2024-11-22 19:21:47,741 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-22 19:21:47,762 main DEBUG Took 0.017724 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 19:21:47,762 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 19:21:47,763 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 19:21:47,764 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 19:21:47,766 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,776 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 19:21:47,795 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,797 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,798 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,798 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,799 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,799 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,801 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,801 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,802 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,802 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,804 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,804 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,805 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,805 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,806 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,806 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,807 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,807 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,808 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,808 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,809 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,810 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,810 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,811 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 19:21:47,811 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,812 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 19:21:47,814 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 19:21:47,816 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 19:21:47,819 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 19:21:47,819 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 19:21:47,821 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 19:21:47,822 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 19:21:47,834 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 19:21:47,837 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 19:21:47,840 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 19:21:47,840 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 19:21:47,841 main DEBUG createAppenders(={Console}) 2024-11-22 19:21:47,842 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-22 19:21:47,842 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-22 19:21:47,842 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-22 19:21:47,843 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 19:21:47,844 main DEBUG OutputStream closed 2024-11-22 19:21:47,844 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 19:21:47,844 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 19:21:47,845 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-22 19:21:47,948 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 19:21:47,951 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 19:21:47,953 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 19:21:47,954 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 19:21:47,955 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 19:21:47,955 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 19:21:47,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 19:21:47,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 19:21:47,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 19:21:47,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 19:21:47,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 19:21:47,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 19:21:47,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 19:21:47,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 19:21:47,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 19:21:47,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 19:21:47,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 19:21:47,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 19:21:47,965 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 19:21:47,965 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-22 19:21:47,966 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 19:21:47,967 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-22T19:21:48,425 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261 2024-11-22 19:21:48,430 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 19:21:48,430 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T19:21:48,444 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-22T19:21:48,477 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T19:21:48,481 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9, deleteOnExit=true 2024-11-22T19:21:48,482 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-22T19:21:48,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/test.cache.data in system properties and HBase conf 2024-11-22T19:21:48,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T19:21:48,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/hadoop.log.dir in system properties and HBase conf 2024-11-22T19:21:48,485 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T19:21:48,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T19:21:48,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-22T19:21:48,623 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T19:21:48,750 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T19:21:48,755 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T19:21:48,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T19:21:48,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T19:21:48,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T19:21:48,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T19:21:48,759 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T19:21:48,759 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T19:21:48,760 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T19:21:48,761 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T19:21:48,761 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/nfs.dump.dir in system properties and HBase conf 2024-11-22T19:21:48,762 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/java.io.tmpdir in system properties and HBase conf 2024-11-22T19:21:48,763 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T19:21:48,763 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T19:21:48,764 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T19:21:49,876 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T19:21:49,996 INFO [Time-limited test {}] log.Log(170): Logging initialized @3406ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T19:21:50,110 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T19:21:50,242 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T19:21:50,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T19:21:50,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T19:21:50,291 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T19:21:50,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T19:21:50,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/hadoop.log.dir/,AVAILABLE} 2024-11-22T19:21:50,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T19:21:50,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/java.io.tmpdir/jetty-localhost-35239-hadoop-hdfs-3_4_1-tests_jar-_-any-15079731750736623234/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T19:21:50,607 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:35239} 2024-11-22T19:21:50,608 INFO [Time-limited test {}] server.Server(415): Started @4018ms 2024-11-22T19:21:51,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T19:21:51,214 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T19:21:51,228 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T19:21:51,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T19:21:51,229 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T19:21:51,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/hadoop.log.dir/,AVAILABLE} 2024-11-22T19:21:51,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T19:21:51,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/java.io.tmpdir/jetty-localhost-41539-hadoop-hdfs-3_4_1-tests_jar-_-any-5229935684980641264/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T19:21:51,402 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:41539} 2024-11-22T19:21:51,402 INFO [Time-limited test {}] server.Server(415): Started @4813ms 2024-11-22T19:21:51,480 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T19:21:52,189 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/dfs/data/data1/current/BP-1459558550-172.17.0.2-1732303309555/current, will proceed with Du for space computation calculation, 2024-11-22T19:21:52,201 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/dfs/data/data2/current/BP-1459558550-172.17.0.2-1732303309555/current, will proceed with Du for space computation calculation, 2024-11-22T19:21:52,265 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T19:21:52,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6751df1a64e4e87 with lease ID 0x5d7cb419712e5000: Processing first storage report for DS-976996e2-ae24-4859-b807-f87a3adaf7ac from datanode DatanodeRegistration(127.0.0.1:41091, datanodeUuid=58be2d5c-3ced-4168-9580-e2f13ac1bdd8, infoPort=34533, infoSecurePort=0, ipcPort=43381, storageInfo=lv=-57;cid=testClusterID;nsid=628515874;c=1732303309555) 2024-11-22T19:21:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6751df1a64e4e87 with lease ID 0x5d7cb419712e5000: from storage DS-976996e2-ae24-4859-b807-f87a3adaf7ac node DatanodeRegistration(127.0.0.1:41091, datanodeUuid=58be2d5c-3ced-4168-9580-e2f13ac1bdd8, infoPort=34533, infoSecurePort=0, ipcPort=43381, storageInfo=lv=-57;cid=testClusterID;nsid=628515874;c=1732303309555), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-22T19:21:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6751df1a64e4e87 with lease ID 0x5d7cb419712e5000: Processing first storage report for DS-05fe9c4b-3b12-41f3-83b7-7ca47b7a2482 from datanode DatanodeRegistration(127.0.0.1:41091, datanodeUuid=58be2d5c-3ced-4168-9580-e2f13ac1bdd8, infoPort=34533, infoSecurePort=0, ipcPort=43381, storageInfo=lv=-57;cid=testClusterID;nsid=628515874;c=1732303309555) 2024-11-22T19:21:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6751df1a64e4e87 with lease ID 0x5d7cb419712e5000: from storage DS-05fe9c4b-3b12-41f3-83b7-7ca47b7a2482 node DatanodeRegistration(127.0.0.1:41091, datanodeUuid=58be2d5c-3ced-4168-9580-e2f13ac1bdd8, infoPort=34533, infoSecurePort=0, ipcPort=43381, storageInfo=lv=-57;cid=testClusterID;nsid=628515874;c=1732303309555), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T19:21:52,406 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261 2024-11-22T19:21:52,505 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/zookeeper_0, clientPort=57120, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T19:21:52,519 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=57120 2024-11-22T19:21:52,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:52,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:52,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741825_1001 (size=7) 2024-11-22T19:21:53,287 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 with version=8 2024-11-22T19:21:53,288 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/hbase-staging 2024-11-22T19:21:53,452 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T19:21:53,801 INFO [Time-limited test {}] client.ConnectionUtils(129): master/a307a1377457:0 server-side Connection retries=45 2024-11-22T19:21:53,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T19:21:53,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T19:21:53,825 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T19:21:53,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T19:21:53,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T19:21:54,018 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T19:21:54,152 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T19:21:54,169 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T19:21:54,175 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T19:21:54,223 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 25076 (auto-detected) 2024-11-22T19:21:54,224 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T19:21:54,249 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38701 2024-11-22T19:21:54,260 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:54,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:54,281 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38701 connecting to ZooKeeper ensemble=127.0.0.1:57120 2024-11-22T19:21:54,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387010x0, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T19:21:54,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38701-0x10020ae8f450000 connected 2024-11-22T19:21:54,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T19:21:54,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T19:21:54,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T19:21:54,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38701 2024-11-22T19:21:54,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38701 2024-11-22T19:21:54,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38701 2024-11-22T19:21:54,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38701 2024-11-22T19:21:54,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38701 2024-11-22T19:21:54,537 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982, hbase.cluster.distributed=false 2024-11-22T19:21:54,659 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/a307a1377457:0 server-side Connection retries=45 2024-11-22T19:21:54,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T19:21:54,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T19:21:54,660 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T19:21:54,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T19:21:54,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T19:21:54,664 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T19:21:54,667 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T19:21:54,677 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35917 2024-11-22T19:21:54,682 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T19:21:54,691 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T19:21:54,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:54,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:54,702 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:35917 connecting to ZooKeeper ensemble=127.0.0.1:57120 2024-11-22T19:21:54,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359170x0, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T19:21:54,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359170x0, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T19:21:54,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359170x0, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T19:21:54,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359170x0, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T19:21:54,724 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35917-0x10020ae8f450001 connected 2024-11-22T19:21:54,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35917 2024-11-22T19:21:54,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35917 2024-11-22T19:21:54,737 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35917 2024-11-22T19:21:54,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35917 2024-11-22T19:21:54,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35917 2024-11-22T19:21:54,747 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/a307a1377457,38701,1732303313442 2024-11-22T19:21:54,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T19:21:54,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T19:21:54,758 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a307a1377457,38701,1732303313442 2024-11-22T19:21:54,769 DEBUG [M:0;a307a1377457:38701 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a307a1377457:38701 2024-11-22T19:21:54,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T19:21:54,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T19:21:54,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:54,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:54,795 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T19:21:54,797 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T19:21:54,797 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a307a1377457,38701,1732303313442 from backup master directory 2024-11-22T19:21:54,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a307a1377457,38701,1732303313442 2024-11-22T19:21:54,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T19:21:54,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T19:21:54,804 WARN [master/a307a1377457:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T19:21:54,805 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a307a1377457,38701,1732303313442 2024-11-22T19:21:54,808 INFO [master/a307a1377457:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T19:21:54,809 INFO [master/a307a1377457:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T19:21:54,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741826_1002 (size=42) 2024-11-22T19:21:55,310 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/hbase.id with ID: 856b0a0b-4f32-4c66-b249-fdd5f534b1f8 2024-11-22T19:21:55,364 INFO [master/a307a1377457:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T19:21:55,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:55,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:55,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741827_1003 (size=196) 2024-11-22T19:21:55,455 INFO [master/a307a1377457:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:21:55,458 INFO [master/a307a1377457:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T19:21:55,482 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:21:55,488 INFO [master/a307a1377457:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T19:21:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741828_1004 (size=1189) 2024-11-22T19:21:55,561 INFO [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store 2024-11-22T19:21:55,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741829_1005 (size=34) 2024-11-22T19:21:56,003 INFO [master/a307a1377457:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T19:21:56,004 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:21:56,006 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T19:21:56,006 INFO [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:21:56,006 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:21:56,007 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T19:21:56,007 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:21:56,007 INFO [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:21:56,007 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-22T19:21:56,011 WARN [master/a307a1377457:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/.initializing 2024-11-22T19:21:56,011 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/WALs/a307a1377457,38701,1732303313442 2024-11-22T19:21:56,020 INFO [master/a307a1377457:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T19:21:56,034 INFO [master/a307a1377457:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a307a1377457%2C38701%2C1732303313442, suffix=, logDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/WALs/a307a1377457,38701,1732303313442, archiveDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/oldWALs, maxLogs=10 2024-11-22T19:21:56,067 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/WALs/a307a1377457,38701,1732303313442/a307a1377457%2C38701%2C1732303313442.1732303316041, exclude list is [], retry=0 2024-11-22T19:21:56,091 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41091,DS-976996e2-ae24-4859-b807-f87a3adaf7ac,DISK] 2024-11-22T19:21:56,095 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-22T19:21:56,148 INFO [master/a307a1377457:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/WALs/a307a1377457,38701,1732303313442/a307a1377457%2C38701%2C1732303313442.1732303316041 2024-11-22T19:21:56,149 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34533:34533)] 2024-11-22T19:21:56,150 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:21:56,151 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:21:56,155 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,157 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T19:21:56,251 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:56,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:56,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T19:21:56,260 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:56,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:21:56,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T19:21:56,266 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:56,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:21:56,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T19:21:56,273 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:56,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:21:56,280 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,281 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,291 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T19:21:56,297 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T19:21:56,304 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:21:56,305 INFO [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72589334, jitterRate=0.08166536688804626}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T19:21:56,310 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-22T19:21:56,311 INFO [master/a307a1377457:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T19:21:56,343 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b7d8c36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:21:56,388 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-22T19:21:56,402 INFO [master/a307a1377457:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T19:21:56,402 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T19:21:56,404 INFO [master/a307a1377457:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T19:21:56,406 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T19:21:56,413 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-11-22T19:21:56,413 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T19:21:56,443 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T19:21:56,460 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T19:21:56,464 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-22T19:21:56,467 INFO [master/a307a1377457:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T19:21:56,468 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T19:21:56,472 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-22T19:21:56,474 INFO [master/a307a1377457:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T19:21:56,478 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T19:21:56,480 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-22T19:21:56,481 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T19:21:56,483 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T19:21:56,494 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T19:21:56,495 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T19:21:56,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T19:21:56,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T19:21:56,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:56,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:56,501 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=a307a1377457,38701,1732303313442, sessionid=0x10020ae8f450000, setting cluster-up flag (Was=false) 2024-11-22T19:21:56,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:56,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:56,525 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T19:21:56,527 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a307a1377457,38701,1732303313442 2024-11-22T19:21:56,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:56,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:56,541 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T19:21:56,546 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a307a1377457,38701,1732303313442 2024-11-22T19:21:56,643 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-22T19:21:56,653 INFO [master/a307a1377457:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-22T19:21:56,657 INFO [master/a307a1377457:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T19:21:56,661 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a307a1377457:35917 2024-11-22T19:21:56,663 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1008): ClusterId : 856b0a0b-4f32-4c66-b249-fdd5f534b1f8 2024-11-22T19:21:56,666 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T19:21:56,665 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a307a1377457,38701,1732303313442 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T19:21:56,669 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a307a1377457:0, corePoolSize=5, maxPoolSize=5 2024-11-22T19:21:56,669 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a307a1377457:0, corePoolSize=5, maxPoolSize=5 2024-11-22T19:21:56,669 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a307a1377457:0, corePoolSize=5, maxPoolSize=5 2024-11-22T19:21:56,669 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a307a1377457:0, corePoolSize=5, maxPoolSize=5 2024-11-22T19:21:56,670 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a307a1377457:0, corePoolSize=10, maxPoolSize=10 2024-11-22T19:21:56,670 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,670 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a307a1377457:0, corePoolSize=2, maxPoolSize=2 2024-11-22T19:21:56,670 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,672 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732303346672 2024-11-22T19:21:56,672 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T19:21:56,672 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T19:21:56,674 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T19:21:56,676 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T19:21:56,677 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T19:21:56,678 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-22T19:21:56,678 DEBUG [RS:0;a307a1377457:35917 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cae2056, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:21:56,678 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-22T19:21:56,680 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T19:21:56,681 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T19:21:56,681 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T19:21:56,681 DEBUG [RS:0;a307a1377457:35917 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@536dc15b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a307a1377457/172.17.0.2:0 2024-11-22T19:21:56,681 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T19:21:56,682 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,685 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:56,685 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T19:21:56,687 INFO [RS:0;a307a1377457:35917 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-22T19:21:56,687 INFO [RS:0;a307a1377457:35917 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-22T19:21:56,687 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-22T19:21:56,690 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(3073): reportForDuty to master=a307a1377457,38701,1732303313442 with isa=a307a1377457/172.17.0.2:35917, startcode=1732303314657 2024-11-22T19:21:56,692 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T19:21:56,694 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T19:21:56,694 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T19:21:56,704 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T19:21:56,705 INFO [master/a307a1377457:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T19:21:56,706 DEBUG [RS:0;a307a1377457:35917 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T19:21:56,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741831_1007 (size=1039) 2024-11-22T19:21:56,720 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a307a1377457:0:becomeActiveMaster-HFileCleaner.large.0-1732303316707,5,FailOnTimeoutGroup] 2024-11-22T19:21:56,723 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a307a1377457:0:becomeActiveMaster-HFileCleaner.small.0-1732303316720,5,FailOnTimeoutGroup] 2024-11-22T19:21:56,724 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,724 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T19:21:56,728 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,728 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,760 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34143, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T19:21:56,769 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38701 {}] master.ServerManager(332): Checking decommissioned status of RegionServer a307a1377457,35917,1732303314657 2024-11-22T19:21:56,772 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38701 {}] master.ServerManager(486): Registering regionserver=a307a1377457,35917,1732303314657 2024-11-22T19:21:56,791 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:21:56,791 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44823 2024-11-22T19:21:56,791 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-22T19:21:56,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T19:21:56,798 DEBUG [RS:0;a307a1377457:35917 {}] zookeeper.ZKUtil(111): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a307a1377457,35917,1732303314657 2024-11-22T19:21:56,798 WARN [RS:0;a307a1377457:35917 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T19:21:56,798 INFO [RS:0;a307a1377457:35917 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T19:21:56,798 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657 2024-11-22T19:21:56,802 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a307a1377457,35917,1732303314657] 2024-11-22T19:21:56,819 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-22T19:21:56,834 INFO [RS:0;a307a1377457:35917 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T19:21:56,851 INFO [RS:0;a307a1377457:35917 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T19:21:56,854 INFO [RS:0;a307a1377457:35917 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T19:21:56,854 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,855 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-22T19:21:56,863 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,864 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,864 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,865 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,865 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,865 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,865 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a307a1377457:0, corePoolSize=2, maxPoolSize=2 2024-11-22T19:21:56,865 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,866 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,866 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,866 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,866 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a307a1377457:0, corePoolSize=1, maxPoolSize=1 2024-11-22T19:21:56,867 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a307a1377457:0, corePoolSize=3, maxPoolSize=3 2024-11-22T19:21:56,867 DEBUG [RS:0;a307a1377457:35917 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0, corePoolSize=3, maxPoolSize=3 2024-11-22T19:21:56,868 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,868 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,869 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,869 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,869 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,35917,1732303314657-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T19:21:56,900 INFO [RS:0;a307a1377457:35917 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T19:21:56,903 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,35917,1732303314657-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:56,933 INFO [RS:0;a307a1377457:35917 {}] regionserver.Replication(204): a307a1377457,35917,1732303314657 started 2024-11-22T19:21:56,933 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1767): Serving as a307a1377457,35917,1732303314657, RpcServer on a307a1377457/172.17.0.2:35917, sessionid=0x10020ae8f450001 2024-11-22T19:21:56,934 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T19:21:56,934 DEBUG [RS:0;a307a1377457:35917 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a307a1377457,35917,1732303314657 2024-11-22T19:21:56,935 DEBUG [RS:0;a307a1377457:35917 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a307a1377457,35917,1732303314657' 2024-11-22T19:21:56,935 DEBUG [RS:0;a307a1377457:35917 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T19:21:56,936 DEBUG [RS:0;a307a1377457:35917 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T19:21:56,937 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T19:21:56,937 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T19:21:56,937 DEBUG [RS:0;a307a1377457:35917 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a307a1377457,35917,1732303314657 2024-11-22T19:21:56,937 DEBUG [RS:0;a307a1377457:35917 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a307a1377457,35917,1732303314657' 2024-11-22T19:21:56,937 DEBUG [RS:0;a307a1377457:35917 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T19:21:56,938 DEBUG [RS:0;a307a1377457:35917 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T19:21:56,939 DEBUG [RS:0;a307a1377457:35917 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T19:21:56,939 INFO [RS:0;a307a1377457:35917 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T19:21:56,939 INFO [RS:0;a307a1377457:35917 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T19:21:57,047 INFO [RS:0;a307a1377457:35917 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T19:21:57,052 INFO [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a307a1377457%2C35917%2C1732303314657, suffix=, logDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657, archiveDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/oldWALs, maxLogs=32 2024-11-22T19:21:57,071 DEBUG [RS:0;a307a1377457:35917 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657/a307a1377457%2C35917%2C1732303314657.1732303317054, exclude list is [], retry=0 2024-11-22T19:21:57,077 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41091,DS-976996e2-ae24-4859-b807-f87a3adaf7ac,DISK] 2024-11-22T19:21:57,081 INFO [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657/a307a1377457%2C35917%2C1732303314657.1732303317054 2024-11-22T19:21:57,082 DEBUG [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34533:34533)] 2024-11-22T19:21:57,119 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-22T19:21:57,119 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:21:57,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741833_1009 (size=32) 2024-11-22T19:21:57,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:21:57,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T19:21:57,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T19:21:57,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:57,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:57,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T19:21:57,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T19:21:57,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:57,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:57,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T19:21:57,553 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T19:21:57,553 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:57,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:57,556 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740 2024-11-22T19:21:57,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740 2024-11-22T19:21:57,560 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:21:57,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-22T19:21:57,569 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:21:57,570 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70040628, jitterRate=0.04368668794631958}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:21:57,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-22T19:21:57,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-22T19:21:57,573 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-22T19:21:57,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-22T19:21:57,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T19:21:57,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T19:21:57,575 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-22T19:21:57,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-22T19:21:57,578 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-22T19:21:57,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-22T19:21:57,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T19:21:57,597 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T19:21:57,600 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T19:21:57,753 DEBUG [a307a1377457:38701 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T19:21:57,758 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:21:57,764 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a307a1377457,35917,1732303314657, state=OPENING 2024-11-22T19:21:57,771 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T19:21:57,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:57,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:57,779 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T19:21:57,779 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T19:21:57,782 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:21:57,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:21:57,962 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T19:21:57,967 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T19:21:57,982 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-22T19:21:57,982 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T19:21:57,983 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-22T19:21:57,992 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a307a1377457%2C35917%2C1732303314657.meta, suffix=.meta, logDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657, archiveDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/oldWALs, maxLogs=32 2024-11-22T19:21:58,012 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657/a307a1377457%2C35917%2C1732303314657.meta.1732303317995.meta, exclude list is [], retry=0 2024-11-22T19:21:58,017 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41091,DS-976996e2-ae24-4859-b807-f87a3adaf7ac,DISK] 2024-11-22T19:21:58,022 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/WALs/a307a1377457,35917,1732303314657/a307a1377457%2C35917%2C1732303314657.meta.1732303317995.meta 2024-11-22T19:21:58,022 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34533:34533)] 2024-11-22T19:21:58,023 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:21:58,025 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T19:21:58,111 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T19:21:58,118 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T19:21:58,124 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T19:21:58,124 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:21:58,124 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-22T19:21:58,124 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-22T19:21:58,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T19:21:58,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T19:21:58,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:58,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:58,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T19:21:58,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T19:21:58,159 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:58,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:58,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T19:21:58,163 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T19:21:58,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:58,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T19:21:58,169 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740 2024-11-22T19:21:58,173 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740 2024-11-22T19:21:58,178 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:21:58,181 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-22T19:21:58,183 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67108327, jitterRate=-8.001923561096191E-6}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:21:58,186 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-22T19:21:58,196 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732303317953 2024-11-22T19:21:58,213 DEBUG [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T19:21:58,214 INFO [RS_OPEN_META-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-22T19:21:58,215 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:21:58,218 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a307a1377457,35917,1732303314657, state=OPEN 2024-11-22T19:21:58,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T19:21:58,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T19:21:58,229 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T19:21:58,229 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T19:21:58,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T19:21:58,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=a307a1377457,35917,1732303314657 in 447 msec 2024-11-22T19:21:58,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T19:21:58,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 654 msec 2024-11-22T19:21:58,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.6640 sec 2024-11-22T19:21:58,257 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732303318257, completionTime=-1 2024-11-22T19:21:58,257 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T19:21:58,258 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-22T19:21:58,304 DEBUG [hconnection-0x4db3c113-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:21:58,307 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:21:58,319 INFO [master/a307a1377457:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-22T19:21:58,320 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732303378319 2024-11-22T19:21:58,320 INFO [master/a307a1377457:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732303438320 2024-11-22T19:21:58,320 INFO [master/a307a1377457:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 62 msec 2024-11-22T19:21:58,348 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,38701,1732303313442-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:58,349 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,38701,1732303313442-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:58,349 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,38701,1732303313442-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:58,352 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a307a1377457:38701, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:58,352 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:58,358 DEBUG [master/a307a1377457:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-22T19:21:58,361 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-22T19:21:58,363 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T19:21:58,370 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-22T19:21:58,374 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:21:58,376 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:58,378 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:21:58,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741835_1011 (size=358) 2024-11-22T19:21:58,797 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 45aa664165800f3151e26f1a3610c687, NAME => 'hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:21:58,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741836_1012 (size=42) 2024-11-22T19:21:59,230 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:21:59,230 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 45aa664165800f3151e26f1a3610c687, disabling compactions & flushes 2024-11-22T19:21:59,231 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,231 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,231 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. after waiting 0 ms 2024-11-22T19:21:59,231 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,231 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,231 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 45aa664165800f3151e26f1a3610c687: 2024-11-22T19:21:59,234 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:21:59,243 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732303319236"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303319236"}]},"ts":"1732303319236"} 2024-11-22T19:21:59,271 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:21:59,274 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:21:59,277 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303319274"}]},"ts":"1732303319274"} 2024-11-22T19:21:59,291 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-22T19:21:59,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=45aa664165800f3151e26f1a3610c687, ASSIGN}] 2024-11-22T19:21:59,303 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=45aa664165800f3151e26f1a3610c687, ASSIGN 2024-11-22T19:21:59,306 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=45aa664165800f3151e26f1a3610c687, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:21:59,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=45aa664165800f3151e26f1a3610c687, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:21:59,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 45aa664165800f3151e26f1a3610c687, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:21:59,619 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:21:59,627 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,628 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 45aa664165800f3151e26f1a3610c687, NAME => 'hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:21:59,628 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,629 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:21:59,629 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,629 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,632 INFO [StoreOpener-45aa664165800f3151e26f1a3610c687-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,636 INFO [StoreOpener-45aa664165800f3151e26f1a3610c687-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45aa664165800f3151e26f1a3610c687 columnFamilyName info 2024-11-22T19:21:59,636 DEBUG [StoreOpener-45aa664165800f3151e26f1a3610c687-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:21:59,637 INFO [StoreOpener-45aa664165800f3151e26f1a3610c687-1 {}] regionserver.HStore(327): Store=45aa664165800f3151e26f1a3610c687/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:21:59,639 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,640 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,645 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 45aa664165800f3151e26f1a3610c687 2024-11-22T19:21:59,649 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:21:59,651 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 45aa664165800f3151e26f1a3610c687; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61818559, jitterRate=-0.07883168756961823}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T19:21:59,653 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 45aa664165800f3151e26f1a3610c687: 2024-11-22T19:21:59,656 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687., pid=6, masterSystemTime=1732303319619 2024-11-22T19:21:59,660 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,661 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:21:59,662 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=45aa664165800f3151e26f1a3610c687, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:21:59,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T19:21:59,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 45aa664165800f3151e26f1a3610c687, server=a307a1377457,35917,1732303314657 in 205 msec 2024-11-22T19:21:59,680 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T19:21:59,680 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=45aa664165800f3151e26f1a3610c687, ASSIGN in 375 msec 2024-11-22T19:21:59,682 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:21:59,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303319682"}]},"ts":"1732303319682"} 2024-11-22T19:21:59,686 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-22T19:21:59,690 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:21:59,694 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3270 sec 2024-11-22T19:21:59,776 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-22T19:21:59,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:59,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-22T19:21:59,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:21:59,812 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-22T19:21:59,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-22T19:21:59,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 35 msec 2024-11-22T19:21:59,858 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-22T19:21:59,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-22T19:21:59,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 19 msec 2024-11-22T19:21:59,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-22T19:21:59,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-22T19:21:59,888 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.083sec 2024-11-22T19:21:59,890 INFO [master/a307a1377457:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T19:21:59,892 INFO [master/a307a1377457:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T19:21:59,893 INFO [master/a307a1377457:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T19:21:59,894 INFO [master/a307a1377457:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T19:21:59,894 INFO [master/a307a1377457:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T19:21:59,895 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,38701,1732303313442-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T19:21:59,896 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,38701,1732303313442-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T19:21:59,904 DEBUG [master/a307a1377457:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-22T19:21:59,905 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T19:21:59,905 INFO [master/a307a1377457:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a307a1377457,38701,1732303313442-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T19:21:59,965 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-22T19:21:59,965 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-22T19:21:59,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:21:59,980 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T19:21:59,980 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T19:21:59,992 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,038 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=a307a1377457,38701,1732303313442 2024-11-22T19:22:00,085 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=399, ProcessCount=11, AvailableMemoryMB=4871 2024-11-22T19:22:00,100 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:22:00,109 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:22:00,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:22:00,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:22:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:00,134 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:22:00,135 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:00,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-22T19:22:00,137 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:22:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T19:22:00,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741837_1013 (size=963) 2024-11-22T19:22:00,171 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:22:00,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741838_1014 (size=53) 2024-11-22T19:22:00,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:00,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 686ebaaf5a8e3b2d28eef9abb3c2302e, disabling compactions & flushes 2024-11-22T19:22:00,202 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. after waiting 0 ms 2024-11-22T19:22:00,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,203 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:00,206 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:22:00,206 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732303320206"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303320206"}]},"ts":"1732303320206"} 2024-11-22T19:22:00,210 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:22:00,212 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:22:00,213 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303320212"}]},"ts":"1732303320212"} 2024-11-22T19:22:00,216 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T19:22:00,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, ASSIGN}] 2024-11-22T19:22:00,225 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, ASSIGN 2024-11-22T19:22:00,228 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:22:00,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T19:22:00,380 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=686ebaaf5a8e3b2d28eef9abb3c2302e, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:00,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T19:22:00,538 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:00,546 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,546 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:22:00,547 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,547 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:00,547 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,547 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,551 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,555 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:00,555 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 686ebaaf5a8e3b2d28eef9abb3c2302e columnFamilyName A 2024-11-22T19:22:00,555 DEBUG [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:00,556 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.HStore(327): Store=686ebaaf5a8e3b2d28eef9abb3c2302e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:00,557 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,559 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:00,559 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 686ebaaf5a8e3b2d28eef9abb3c2302e columnFamilyName B 2024-11-22T19:22:00,559 DEBUG [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:00,560 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.HStore(327): Store=686ebaaf5a8e3b2d28eef9abb3c2302e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:00,561 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,562 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:00,563 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 686ebaaf5a8e3b2d28eef9abb3c2302e columnFamilyName C 2024-11-22T19:22:00,563 DEBUG [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:00,564 INFO [StoreOpener-686ebaaf5a8e3b2d28eef9abb3c2302e-1 {}] regionserver.HStore(327): Store=686ebaaf5a8e3b2d28eef9abb3c2302e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:00,564 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,566 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,567 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,571 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:22:00,574 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:00,578 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:22:00,579 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 686ebaaf5a8e3b2d28eef9abb3c2302e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73646762, jitterRate=0.0974222719669342}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:22:00,581 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:00,582 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., pid=11, masterSystemTime=1732303320538 2024-11-22T19:22:00,586 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,586 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:00,588 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=686ebaaf5a8e3b2d28eef9abb3c2302e, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:00,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-22T19:22:00,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 in 207 msec 2024-11-22T19:22:00,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T19:22:00,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, ASSIGN in 373 msec 2024-11-22T19:22:00,600 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:22:00,601 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303320600"}]},"ts":"1732303320600"} 2024-11-22T19:22:00,604 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T19:22:00,608 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:22:00,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 482 msec 2024-11-22T19:22:00,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T19:22:00,768 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-22T19:22:00,774 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fdf5682 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f6e36fe 2024-11-22T19:22:00,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e98ea32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,787 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,790 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,794 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:22:00,796 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:22:00,806 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d38d10 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f343a4d 2024-11-22T19:22:00,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12885408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,818 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c63ae4e to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22cb07dd 2024-11-22T19:22:00,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c43377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,829 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x736f1673 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@478bae6b 2024-11-22T19:22:00,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4977266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,834 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ee2166f to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5400112e 2024-11-22T19:22:00,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8f4734, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,843 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f34ff67 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38766d64 2024-11-22T19:22:00,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18603bb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,848 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b5cad1a to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@295cb1ac 2024-11-22T19:22:00,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e97e4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,857 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c3b736e to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70267494 2024-11-22T19:22:00,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490457fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,867 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x767a8485 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d2a8e08 2024-11-22T19:22:00,871 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c8de680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,874 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6502d571 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c915d17 2024-11-22T19:22:00,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f6b07e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:00,897 DEBUG [hconnection-0x42d5ecaf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,907 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,912 DEBUG [hconnection-0x2717408a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:00,921 DEBUG [hconnection-0x1956867-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-22T19:22:00,926 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:00,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T19:22:00,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:00,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:00,937 DEBUG [hconnection-0x82c8c4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,940 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,942 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,945 DEBUG [hconnection-0x3785b265-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,949 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,959 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,960 DEBUG [hconnection-0x3f6101e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,963 DEBUG [hconnection-0x1417da6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,969 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,969 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,979 DEBUG [hconnection-0x19cc3c4a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:00,994 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:00,998 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:01,031 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:01,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T19:22:01,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:01,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:01,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:01,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:01,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:01,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:01,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:01,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T19:22:01,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:01,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/51f9d6864357482dbf0fdc53e6220187 is 50, key is test_row_0/A:col10/1732303321009/Put/seqid=0 2024-11-22T19:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T19:22:01,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741839_1015 (size=12001) 2024-11-22T19:22:01,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/51f9d6864357482dbf0fdc53e6220187 2024-11-22T19:22:01,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303381273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303381281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303381280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303381286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303381289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,304 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:01,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T19:22:01,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:01,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303381449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303381450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303381451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303381450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303381452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/264f8753a5ab44a8bdce804a619c54c1 is 50, key is test_row_0/B:col10/1732303321009/Put/seqid=0 2024-11-22T19:22:01,475 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:01,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T19:22:01,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:01,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741840_1016 (size=12001) 2024-11-22T19:22:01,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/264f8753a5ab44a8bdce804a619c54c1 2024-11-22T19:22:01,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T19:22:01,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/ce2be98c33e946e6b1fa17fbf3756864 is 50, key is test_row_0/C:col10/1732303321009/Put/seqid=0 2024-11-22T19:22:01,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741841_1017 (size=12001) 2024-11-22T19:22:01,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/ce2be98c33e946e6b1fa17fbf3756864 2024-11-22T19:22:01,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/51f9d6864357482dbf0fdc53e6220187 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/51f9d6864357482dbf0fdc53e6220187 2024-11-22T19:22:01,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:01,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T19:22:01,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/51f9d6864357482dbf0fdc53e6220187, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T19:22:01,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:01,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:01,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303381659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303381659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/264f8753a5ab44a8bdce804a619c54c1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/264f8753a5ab44a8bdce804a619c54c1 2024-11-22T19:22:01,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303381660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/264f8753a5ab44a8bdce804a619c54c1, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T19:22:01,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/ce2be98c33e946e6b1fa17fbf3756864 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/ce2be98c33e946e6b1fa17fbf3756864 2024-11-22T19:22:01,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303381662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:01,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303381663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:01,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/ce2be98c33e946e6b1fa17fbf3756864, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T19:22:01,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 683ms, sequenceid=12, compaction requested=false 2024-11-22T19:22:01,716 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-22T19:22:01,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:01,813 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:01,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T19:22:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:01,814 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:01,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:01,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e51942e0e6ca4a84b6d7040a6c886040 is 50, key is test_row_0/A:col10/1732303321278/Put/seqid=0 2024-11-22T19:22:01,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741842_1018 (size=12001) 2024-11-22T19:22:01,909 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e51942e0e6ca4a84b6d7040a6c886040 2024-11-22T19:22:01,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/5c2fc7c78c5749949cc6ba32c7ec9ada is 50, key is test_row_0/B:col10/1732303321278/Put/seqid=0 2024-11-22T19:22:01,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:01,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:02,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741843_1019 (size=12001) 2024-11-22T19:22:02,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303382005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,018 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/5c2fc7c78c5749949cc6ba32c7ec9ada 2024-11-22T19:22:02,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303382008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303382011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T19:22:02,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303382027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303382027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/74d30a8dd3924e85b7beb5378b20a163 is 50, key is test_row_0/C:col10/1732303321278/Put/seqid=0 2024-11-22T19:22:02,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741844_1020 (size=12001) 2024-11-22T19:22:02,093 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/74d30a8dd3924e85b7beb5378b20a163 2024-11-22T19:22:02,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e51942e0e6ca4a84b6d7040a6c886040 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e51942e0e6ca4a84b6d7040a6c886040 2024-11-22T19:22:02,128 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e51942e0e6ca4a84b6d7040a6c886040, entries=150, sequenceid=37, filesize=11.7 K 2024-11-22T19:22:02,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303382126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/5c2fc7c78c5749949cc6ba32c7ec9ada as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5c2fc7c78c5749949cc6ba32c7ec9ada 2024-11-22T19:22:02,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303382130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303382131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,148 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5c2fc7c78c5749949cc6ba32c7ec9ada, entries=150, sequenceid=37, filesize=11.7 K 2024-11-22T19:22:02,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/74d30a8dd3924e85b7beb5378b20a163 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/74d30a8dd3924e85b7beb5378b20a163 2024-11-22T19:22:02,175 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/74d30a8dd3924e85b7beb5378b20a163, entries=150, sequenceid=37, filesize=11.7 K 2024-11-22T19:22:02,179 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 364ms, sequenceid=37, compaction requested=false 2024-11-22T19:22:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:02,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:02,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-22T19:22:02,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-22T19:22:02,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-22T19:22:02,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2530 sec 2024-11-22T19:22:02,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.2720 sec 2024-11-22T19:22:02,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:02,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:22:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:02,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b26d8f3e9620497f88428d849f951667 is 50, key is test_row_0/A:col10/1732303322342/Put/seqid=0 2024-11-22T19:22:02,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741845_1021 (size=16681) 2024-11-22T19:22:02,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303382457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303382457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303382465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303382557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303382557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303382567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303382568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303382569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303382786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303382786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303382786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:02,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b26d8f3e9620497f88428d849f951667 2024-11-22T19:22:02,825 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T19:22:02,826 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-22T19:22:02,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/fb24d566f81d46969786685cae1b5b9c is 50, key is test_row_0/B:col10/1732303322342/Put/seqid=0 2024-11-22T19:22:02,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741846_1022 (size=12001) 2024-11-22T19:22:02,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/fb24d566f81d46969786685cae1b5b9c 2024-11-22T19:22:02,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/32ee401a855e47c2ab9bb79bca56d04c is 50, key is test_row_0/C:col10/1732303322342/Put/seqid=0 2024-11-22T19:22:03,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741847_1023 (size=12001) 2024-11-22T19:22:03,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/32ee401a855e47c2ab9bb79bca56d04c 2024-11-22T19:22:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T19:22:03,056 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-22T19:22:03,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-22T19:22:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T19:22:03,068 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:03,070 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:03,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:03,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b26d8f3e9620497f88428d849f951667 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b26d8f3e9620497f88428d849f951667 2024-11-22T19:22:03,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b26d8f3e9620497f88428d849f951667, entries=250, sequenceid=51, filesize=16.3 K 2024-11-22T19:22:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/fb24d566f81d46969786685cae1b5b9c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fb24d566f81d46969786685cae1b5b9c 2024-11-22T19:22:03,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303383094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303383094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303383096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fb24d566f81d46969786685cae1b5b9c, entries=150, sequenceid=51, filesize=11.7 K 2024-11-22T19:22:03,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/32ee401a855e47c2ab9bb79bca56d04c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/32ee401a855e47c2ab9bb79bca56d04c 2024-11-22T19:22:03,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/32ee401a855e47c2ab9bb79bca56d04c, entries=150, sequenceid=51, filesize=11.7 K 2024-11-22T19:22:03,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 791ms, sequenceid=51, compaction requested=true 2024-11-22T19:22:03,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:03,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:03,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:03,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:03,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:03,154 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:03,154 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:03,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:03,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:03,159 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:03,161 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:03,162 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:03,162 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/264f8753a5ab44a8bdce804a619c54c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5c2fc7c78c5749949cc6ba32c7ec9ada, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fb24d566f81d46969786685cae1b5b9c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.2 K 2024-11-22T19:22:03,163 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 264f8753a5ab44a8bdce804a619c54c1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732303321009 2024-11-22T19:22:03,164 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:03,165 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c2fc7c78c5749949cc6ba32c7ec9ada, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732303321267 2024-11-22T19:22:03,165 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:03,165 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:03,166 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting fb24d566f81d46969786685cae1b5b9c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732303322005 2024-11-22T19:22:03,166 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/51f9d6864357482dbf0fdc53e6220187, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e51942e0e6ca4a84b6d7040a6c886040, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b26d8f3e9620497f88428d849f951667] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=39.7 K 2024-11-22T19:22:03,168 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51f9d6864357482dbf0fdc53e6220187, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732303321009 2024-11-22T19:22:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T19:22:03,176 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e51942e0e6ca4a84b6d7040a6c886040, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732303321267 2024-11-22T19:22:03,178 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b26d8f3e9620497f88428d849f951667, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732303321994 2024-11-22T19:22:03,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:03,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-22T19:22:03,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:03,227 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:22:03,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:03,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:03,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:03,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:03,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:03,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:03,243 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#10 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:03,249 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c383e839e20c430bac38936c99c1a29b is 50, key is test_row_0/A:col10/1732303322342/Put/seqid=0 2024-11-22T19:22:03,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/5d289ca866e340a28aee534eb4dc2a42 is 50, key is test_row_0/A:col10/1732303322452/Put/seqid=0 2024-11-22T19:22:03,257 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#9 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:03,258 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3cab44855ebf4504ad8651a3e9fb71e8 is 50, key is test_row_0/B:col10/1732303322342/Put/seqid=0 2024-11-22T19:22:03,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741848_1024 (size=12104) 2024-11-22T19:22:03,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741849_1025 (size=12001) 2024-11-22T19:22:03,338 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/5d289ca866e340a28aee534eb4dc2a42 2024-11-22T19:22:03,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741850_1026 (size=12104) 2024-11-22T19:22:03,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T19:22:03,384 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3cab44855ebf4504ad8651a3e9fb71e8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3cab44855ebf4504ad8651a3e9fb71e8 2024-11-22T19:22:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d261caab355c453cb64b8e264615d782 is 50, key is test_row_0/B:col10/1732303322452/Put/seqid=0 2024-11-22T19:22:03,411 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 3cab44855ebf4504ad8651a3e9fb71e8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:03,411 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:03,411 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303323154; duration=0sec 2024-11-22T19:22:03,412 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:03,412 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:03,412 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:03,422 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:03,422 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:03,422 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:03,422 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/ce2be98c33e946e6b1fa17fbf3756864, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/74d30a8dd3924e85b7beb5378b20a163, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/32ee401a855e47c2ab9bb79bca56d04c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.2 K 2024-11-22T19:22:03,423 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ce2be98c33e946e6b1fa17fbf3756864, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732303321009 2024-11-22T19:22:03,424 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 74d30a8dd3924e85b7beb5378b20a163, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732303321267 2024-11-22T19:22:03,425 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 32ee401a855e47c2ab9bb79bca56d04c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732303322005 2024-11-22T19:22:03,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741851_1027 (size=12001) 2024-11-22T19:22:03,432 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d261caab355c453cb64b8e264615d782 2024-11-22T19:22:03,472 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#13 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:03,474 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/25e7047a890f4cf5a8455cf1a660633e is 50, key is test_row_0/C:col10/1732303322342/Put/seqid=0 2024-11-22T19:22:03,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2ccdee5e3b35401a947028c6449a2cc8 is 50, key is test_row_0/C:col10/1732303322452/Put/seqid=0 2024-11-22T19:22:03,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741852_1028 (size=12104) 2024-11-22T19:22:03,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741853_1029 (size=12001) 2024-11-22T19:22:03,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:03,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:03,639 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T19:22:03,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303383631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303383637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303383644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303383645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303383645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T19:22:03,732 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c383e839e20c430bac38936c99c1a29b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c383e839e20c430bac38936c99c1a29b 2024-11-22T19:22:03,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303383747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303383747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,752 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into c383e839e20c430bac38936c99c1a29b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:03,752 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303383749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,752 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303323139; duration=0sec 2024-11-22T19:22:03,753 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:03,753 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:03,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303383750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303383750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303383954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303383954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303383959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303383959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303383959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:03,969 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/25e7047a890f4cf5a8455cf1a660633e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/25e7047a890f4cf5a8455cf1a660633e 2024-11-22T19:22:03,975 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2ccdee5e3b35401a947028c6449a2cc8 2024-11-22T19:22:03,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/5d289ca866e340a28aee534eb4dc2a42 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5d289ca866e340a28aee534eb4dc2a42 2024-11-22T19:22:03,995 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 25e7047a890f4cf5a8455cf1a660633e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:03,995 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:03,995 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303323154; duration=0sec 2024-11-22T19:22:03,995 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:03,995 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:04,005 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5d289ca866e340a28aee534eb4dc2a42, entries=150, sequenceid=74, filesize=11.7 K 2024-11-22T19:22:04,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d261caab355c453cb64b8e264615d782 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d261caab355c453cb64b8e264615d782 2024-11-22T19:22:04,022 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d261caab355c453cb64b8e264615d782, entries=150, sequenceid=74, filesize=11.7 K 2024-11-22T19:22:04,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2ccdee5e3b35401a947028c6449a2cc8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2ccdee5e3b35401a947028c6449a2cc8 2024-11-22T19:22:04,047 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2ccdee5e3b35401a947028c6449a2cc8, entries=150, sequenceid=74, filesize=11.7 K 2024-11-22T19:22:04,050 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 823ms, sequenceid=74, compaction requested=false 2024-11-22T19:22:04,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:04,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-22T19:22:04,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-22T19:22:04,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-22T19:22:04,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 984 msec 2024-11-22T19:22:04,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 999 msec 2024-11-22T19:22:04,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T19:22:04,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T19:22:04,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-22T19:22:04,149 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-22T19:22:04,150 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T19:22:04,150 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T19:22:04,151 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T19:22:04,151 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T19:22:04,152 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T19:22:04,153 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-22T19:22:04,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T19:22:04,175 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-22T19:22:04,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:04,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-22T19:22:04,182 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:04,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T19:22:04,183 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:04,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:04,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:04,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T19:22:04,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:04,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/3c5cde39fe14405eabe7eaa5a7971529 is 50, key is test_row_0/A:col10/1732303323623/Put/seqid=0 2024-11-22T19:22:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T19:22:04,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741854_1030 (size=12001) 2024-11-22T19:22:04,336 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:04,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T19:22:04,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:04,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,337 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303384345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303384346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303384347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303384347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303384350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303384455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303384456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303384457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303384457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303384458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T19:22:04,490 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:04,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T19:22:04,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:04,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,647 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:04,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T19:22:04,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:04,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303384663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303384663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303384664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303384665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303384666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:04,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/3c5cde39fe14405eabe7eaa5a7971529 2024-11-22T19:22:04,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/780d7e2ac5e34d768ddf59d7801e891c is 50, key is test_row_0/B:col10/1732303323623/Put/seqid=0 2024-11-22T19:22:04,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741855_1031 (size=12001) 2024-11-22T19:22:04,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/780d7e2ac5e34d768ddf59d7801e891c 2024-11-22T19:22:04,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T19:22:04,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/0c3ccd21adbf4ec88cf33b8bf8312b33 is 50, key is test_row_0/C:col10/1732303323623/Put/seqid=0 2024-11-22T19:22:04,803 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:04,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T19:22:04,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:04,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:04,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741856_1032 (size=12001) 2024-11-22T19:22:04,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/0c3ccd21adbf4ec88cf33b8bf8312b33 2024-11-22T19:22:04,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/3c5cde39fe14405eabe7eaa5a7971529 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3c5cde39fe14405eabe7eaa5a7971529 2024-11-22T19:22:04,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3c5cde39fe14405eabe7eaa5a7971529, entries=150, sequenceid=94, filesize=11.7 K 2024-11-22T19:22:04,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/780d7e2ac5e34d768ddf59d7801e891c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/780d7e2ac5e34d768ddf59d7801e891c 2024-11-22T19:22:04,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/780d7e2ac5e34d768ddf59d7801e891c, entries=150, sequenceid=94, filesize=11.7 K 2024-11-22T19:22:04,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/0c3ccd21adbf4ec88cf33b8bf8312b33 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0c3ccd21adbf4ec88cf33b8bf8312b33 2024-11-22T19:22:04,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0c3ccd21adbf4ec88cf33b8bf8312b33, entries=150, sequenceid=94, filesize=11.7 K 2024-11-22T19:22:04,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 631ms, sequenceid=94, compaction requested=true 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:04,900 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:04,900 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:04,903 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:04,904 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:04,904 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,904 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3cab44855ebf4504ad8651a3e9fb71e8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d261caab355c453cb64b8e264615d782, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/780d7e2ac5e34d768ddf59d7801e891c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.3 K 2024-11-22T19:22:04,904 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:04,904 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:04,904 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,905 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c383e839e20c430bac38936c99c1a29b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5d289ca866e340a28aee534eb4dc2a42, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3c5cde39fe14405eabe7eaa5a7971529] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.3 K 2024-11-22T19:22:04,905 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cab44855ebf4504ad8651a3e9fb71e8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732303322005 2024-11-22T19:22:04,906 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c383e839e20c430bac38936c99c1a29b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732303322005 2024-11-22T19:22:04,906 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d261caab355c453cb64b8e264615d782, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732303322441 2024-11-22T19:22:04,908 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d289ca866e340a28aee534eb4dc2a42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732303322441 2024-11-22T19:22:04,908 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 780d7e2ac5e34d768ddf59d7801e891c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732303323623 2024-11-22T19:22:04,909 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c5cde39fe14405eabe7eaa5a7971529, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732303323623 2024-11-22T19:22:04,930 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:04,931 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f6a7fbf48aa245a9ad02eaef757f14cc is 50, key is test_row_0/A:col10/1732303323623/Put/seqid=0 2024-11-22T19:22:04,936 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:04,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/03535eb77cd74d20b5cc6aef2e3c630a is 50, key is test_row_0/B:col10/1732303323623/Put/seqid=0 2024-11-22T19:22:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741857_1033 (size=12207) 2024-11-22T19:22:04,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741858_1034 (size=12207) 2024-11-22T19:22:04,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:04,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T19:22:04,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:04,961 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:04,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:04,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:04,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:04,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:04,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:04,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:04,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/742a704aeb9746709733d1e9e0d5ef69 is 50, key is test_row_0/A:col10/1732303324344/Put/seqid=0 2024-11-22T19:22:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:04,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:04,977 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/03535eb77cd74d20b5cc6aef2e3c630a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/03535eb77cd74d20b5cc6aef2e3c630a 2024-11-22T19:22:04,982 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f6a7fbf48aa245a9ad02eaef757f14cc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6a7fbf48aa245a9ad02eaef757f14cc 2024-11-22T19:22:04,996 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 03535eb77cd74d20b5cc6aef2e3c630a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:04,996 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:04,996 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303324900; duration=0sec 2024-11-22T19:22:04,996 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:04,996 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:04,997 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:04,999 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:04,999 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:05,000 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:05,000 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/25e7047a890f4cf5a8455cf1a660633e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2ccdee5e3b35401a947028c6449a2cc8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0c3ccd21adbf4ec88cf33b8bf8312b33] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.3 K 2024-11-22T19:22:05,001 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 25e7047a890f4cf5a8455cf1a660633e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732303322005 2024-11-22T19:22:05,002 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ccdee5e3b35401a947028c6449a2cc8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732303322441 2024-11-22T19:22:05,003 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c3ccd21adbf4ec88cf33b8bf8312b33, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732303323623 2024-11-22T19:22:05,005 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into f6a7fbf48aa245a9ad02eaef757f14cc(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:05,006 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:05,006 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303324900; duration=0sec 2024-11-22T19:22:05,006 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:05,006 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:05,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303384997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303385000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303385005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303385008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303385010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741859_1035 (size=12001) 2024-11-22T19:22:05,019 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/742a704aeb9746709733d1e9e0d5ef69 2024-11-22T19:22:05,030 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#21 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:05,031 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/aa63b5cbc50342ec95c65320139703f1 is 50, key is test_row_0/C:col10/1732303323623/Put/seqid=0 2024-11-22T19:22:05,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2b457d32c99f4c5fbd6fcc52cb1635e5 is 50, key is test_row_0/B:col10/1732303324344/Put/seqid=0 2024-11-22T19:22:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741860_1036 (size=12207) 2024-11-22T19:22:05,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741861_1037 (size=12001) 2024-11-22T19:22:05,054 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2b457d32c99f4c5fbd6fcc52cb1635e5 2024-11-22T19:22:05,063 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/aa63b5cbc50342ec95c65320139703f1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/aa63b5cbc50342ec95c65320139703f1 2024-11-22T19:22:05,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/34985edc155c416c95c4d95fe4b7f599 is 50, key is test_row_0/C:col10/1732303324344/Put/seqid=0 2024-11-22T19:22:05,085 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into aa63b5cbc50342ec95c65320139703f1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:05,086 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:05,086 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303324900; duration=0sec 2024-11-22T19:22:05,086 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:05,086 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:05,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741862_1038 (size=12001) 2024-11-22T19:22:05,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303385112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303385112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303385116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303385117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303385119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T19:22:05,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303385317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303385317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303385321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303385322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303385324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,504 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/34985edc155c416c95c4d95fe4b7f599 2024-11-22T19:22:05,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/742a704aeb9746709733d1e9e0d5ef69 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/742a704aeb9746709733d1e9e0d5ef69 2024-11-22T19:22:05,533 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/742a704aeb9746709733d1e9e0d5ef69, entries=150, sequenceid=113, filesize=11.7 K 2024-11-22T19:22:05,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2b457d32c99f4c5fbd6fcc52cb1635e5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2b457d32c99f4c5fbd6fcc52cb1635e5 2024-11-22T19:22:05,572 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2b457d32c99f4c5fbd6fcc52cb1635e5, entries=150, sequenceid=113, filesize=11.7 K 2024-11-22T19:22:05,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/34985edc155c416c95c4d95fe4b7f599 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/34985edc155c416c95c4d95fe4b7f599 2024-11-22T19:22:05,590 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/34985edc155c416c95c4d95fe4b7f599, entries=150, sequenceid=113, filesize=11.7 K 2024-11-22T19:22:05,592 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 631ms, sequenceid=113, compaction requested=false 2024-11-22T19:22:05,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:05,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:05,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-22T19:22:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-22T19:22:05,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-22T19:22:05,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4110 sec 2024-11-22T19:22:05,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.4210 sec 2024-11-22T19:22:05,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:05,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:05,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:05,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:05,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:05,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:05,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:05,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/ee56c44646ec47e587d59d32c8317433 is 50, key is test_row_0/A:col10/1732303325626/Put/seqid=0 2024-11-22T19:22:05,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303385648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303385649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303385651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303385652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303385654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741863_1039 (size=12151) 2024-11-22T19:22:05,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303385757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303385757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303385758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303385758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303385758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303385963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303385963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303385963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303385967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:05,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:05,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303385969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/ee56c44646ec47e587d59d32c8317433 2024-11-22T19:22:06,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/e7d41d805f084902b5d767389bf1f7ca is 50, key is test_row_0/B:col10/1732303325626/Put/seqid=0 2024-11-22T19:22:06,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741864_1040 (size=12151) 2024-11-22T19:22:06,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303386268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303386271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303386271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303386274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303386274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T19:22:06,291 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-22T19:22:06,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-22T19:22:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T19:22:06,299 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:06,300 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:06,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T19:22:06,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:06,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T19:22:06,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:06,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/e7d41d805f084902b5d767389bf1f7ca 2024-11-22T19:22:06,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/0acb4e7893774a508eb13dedba3d38e2 is 50, key is test_row_0/C:col10/1732303325626/Put/seqid=0 2024-11-22T19:22:06,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741865_1041 (size=12151) 2024-11-22T19:22:06,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T19:22:06,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:06,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T19:22:06,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:06,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,763 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:06,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T19:22:06,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:06,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303386773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303386772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303386775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303386781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:06,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303386784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:06,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T19:22:06,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:06,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T19:22:06,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:06,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:06,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/0acb4e7893774a508eb13dedba3d38e2 2024-11-22T19:22:06,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/ee56c44646ec47e587d59d32c8317433 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ee56c44646ec47e587d59d32c8317433 2024-11-22T19:22:06,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ee56c44646ec47e587d59d32c8317433, entries=150, sequenceid=137, filesize=11.9 K 2024-11-22T19:22:06,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/e7d41d805f084902b5d767389bf1f7ca as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e7d41d805f084902b5d767389bf1f7ca 2024-11-22T19:22:06,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e7d41d805f084902b5d767389bf1f7ca, entries=150, sequenceid=137, filesize=11.9 K 2024-11-22T19:22:06,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/0acb4e7893774a508eb13dedba3d38e2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0acb4e7893774a508eb13dedba3d38e2 2024-11-22T19:22:06,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0acb4e7893774a508eb13dedba3d38e2, entries=150, sequenceid=137, filesize=11.9 K 2024-11-22T19:22:06,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 1363ms, sequenceid=137, compaction requested=true 2024-11-22T19:22:06,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:06,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:06,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:06,993 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:06,993 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:06,995 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:06,995 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:06,995 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,995 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/03535eb77cd74d20b5cc6aef2e3c630a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2b457d32c99f4c5fbd6fcc52cb1635e5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e7d41d805f084902b5d767389bf1f7ca] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.5 K 2024-11-22T19:22:06,996 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:06,996 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:06,996 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:06,996 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6a7fbf48aa245a9ad02eaef757f14cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/742a704aeb9746709733d1e9e0d5ef69, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ee56c44646ec47e587d59d32c8317433] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.5 K 2024-11-22T19:22:06,997 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 03535eb77cd74d20b5cc6aef2e3c630a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732303323623 2024-11-22T19:22:06,997 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6a7fbf48aa245a9ad02eaef757f14cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732303323623 2024-11-22T19:22:06,998 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b457d32c99f4c5fbd6fcc52cb1635e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732303324344 2024-11-22T19:22:06,998 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 742a704aeb9746709733d1e9e0d5ef69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732303324344 2024-11-22T19:22:06,998 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee56c44646ec47e587d59d32c8317433, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732303325003 2024-11-22T19:22:06,999 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e7d41d805f084902b5d767389bf1f7ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732303325003 2024-11-22T19:22:06,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:07,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:07,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:07,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:07,023 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:07,024 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/5419d037e70345588de36a0d6d3afd13 is 50, key is test_row_0/B:col10/1732303325626/Put/seqid=0 2024-11-22T19:22:07,030 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:07,031 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/db768bd5280e4d0082557f6a6ec7c2cf is 50, key is test_row_0/A:col10/1732303325626/Put/seqid=0 2024-11-22T19:22:07,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741867_1043 (size=12459) 2024-11-22T19:22:07,068 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/db768bd5280e4d0082557f6a6ec7c2cf as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/db768bd5280e4d0082557f6a6ec7c2cf 2024-11-22T19:22:07,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741866_1042 (size=12459) 2024-11-22T19:22:07,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:07,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T19:22:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:07,075 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T19:22:07,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:07,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:07,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:07,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:07,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:07,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:07,081 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/5419d037e70345588de36a0d6d3afd13 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5419d037e70345588de36a0d6d3afd13 2024-11-22T19:22:07,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/ebefe97f22b744cdb221c7a7cacd8f0a is 50, key is test_row_0/A:col10/1732303325638/Put/seqid=0 2024-11-22T19:22:07,093 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 5419d037e70345588de36a0d6d3afd13(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:07,094 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:07,094 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303326993; duration=0sec 2024-11-22T19:22:07,095 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:07,095 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:07,096 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:07,099 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:07,099 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:07,099 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:07,099 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/aa63b5cbc50342ec95c65320139703f1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/34985edc155c416c95c4d95fe4b7f599, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0acb4e7893774a508eb13dedba3d38e2] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.5 K 2024-11-22T19:22:07,100 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into db768bd5280e4d0082557f6a6ec7c2cf(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:07,100 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:07,100 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303326993; duration=0sec 2024-11-22T19:22:07,100 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:07,100 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:07,101 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting aa63b5cbc50342ec95c65320139703f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732303323623 2024-11-22T19:22:07,101 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 34985edc155c416c95c4d95fe4b7f599, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732303324344 2024-11-22T19:22:07,102 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0acb4e7893774a508eb13dedba3d38e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732303325003 2024-11-22T19:22:07,127 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:07,128 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/813e114eada04a65999f6dfedde05c44 is 50, key is test_row_0/C:col10/1732303325626/Put/seqid=0 2024-11-22T19:22:07,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741868_1044 (size=12151) 2024-11-22T19:22:07,132 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/ebefe97f22b744cdb221c7a7cacd8f0a 2024-11-22T19:22:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/604b53015461438697085951c6cd5bba is 50, key is test_row_0/B:col10/1732303325638/Put/seqid=0 2024-11-22T19:22:07,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741869_1045 (size=12459) 2024-11-22T19:22:07,177 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/813e114eada04a65999f6dfedde05c44 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/813e114eada04a65999f6dfedde05c44 2024-11-22T19:22:07,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741870_1046 (size=12151) 2024-11-22T19:22:07,187 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/604b53015461438697085951c6cd5bba 2024-11-22T19:22:07,188 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 813e114eada04a65999f6dfedde05c44(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:07,188 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:07,188 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303327001; duration=0sec 2024-11-22T19:22:07,188 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:07,188 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:07,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/344ce40fe0bf48f3bb6b9b56d29e0f27 is 50, key is test_row_0/C:col10/1732303325638/Put/seqid=0 2024-11-22T19:22:07,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741871_1047 (size=12151) 2024-11-22T19:22:07,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T19:22:07,615 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/344ce40fe0bf48f3bb6b9b56d29e0f27 2024-11-22T19:22:07,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/ebefe97f22b744cdb221c7a7cacd8f0a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ebefe97f22b744cdb221c7a7cacd8f0a 2024-11-22T19:22:07,640 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ebefe97f22b744cdb221c7a7cacd8f0a, entries=150, sequenceid=152, filesize=11.9 K 2024-11-22T19:22:07,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/604b53015461438697085951c6cd5bba as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/604b53015461438697085951c6cd5bba 2024-11-22T19:22:07,649 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/604b53015461438697085951c6cd5bba, entries=150, sequenceid=152, filesize=11.9 K 2024-11-22T19:22:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/344ce40fe0bf48f3bb6b9b56d29e0f27 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/344ce40fe0bf48f3bb6b9b56d29e0f27 2024-11-22T19:22:07,659 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/344ce40fe0bf48f3bb6b9b56d29e0f27, entries=150, sequenceid=152, filesize=11.9 K 2024-11-22T19:22:07,660 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 585ms, sequenceid=152, compaction requested=false 2024-11-22T19:22:07,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:07,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:07,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-22T19:22:07,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-22T19:22:07,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-22T19:22:07,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3630 sec 2024-11-22T19:22:07,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.3760 sec 2024-11-22T19:22:07,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:07,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:07,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:07,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:07,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:07,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:07,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:07,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:07,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/9d729f718e8c403f9c42dd9a84b7719c is 50, key is test_row_0/A:col10/1732303327789/Put/seqid=0 2024-11-22T19:22:07,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741872_1048 (size=14541) 2024-11-22T19:22:07,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/9d729f718e8c403f9c42dd9a84b7719c 2024-11-22T19:22:07,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/10d26633a36240ceb1d3e8115e874a51 is 50, key is test_row_0/B:col10/1732303327789/Put/seqid=0 2024-11-22T19:22:07,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741873_1049 (size=12151) 2024-11-22T19:22:07,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/10d26633a36240ceb1d3e8115e874a51 2024-11-22T19:22:07,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303387857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303387861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303387862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303387862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303387865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b9c03510318443ecb7d8d4637f3368f3 is 50, key is test_row_0/C:col10/1732303327789/Put/seqid=0 2024-11-22T19:22:07,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741874_1050 (size=12151) 2024-11-22T19:22:07,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303387976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303387977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303387977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303387978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:07,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303387979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303388183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303388184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303388184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303388184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303388184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b9c03510318443ecb7d8d4637f3368f3 2024-11-22T19:22:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/9d729f718e8c403f9c42dd9a84b7719c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/9d729f718e8c403f9c42dd9a84b7719c 2024-11-22T19:22:08,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/9d729f718e8c403f9c42dd9a84b7719c, entries=200, sequenceid=166, filesize=14.2 K 2024-11-22T19:22:08,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/10d26633a36240ceb1d3e8115e874a51 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/10d26633a36240ceb1d3e8115e874a51 2024-11-22T19:22:08,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/10d26633a36240ceb1d3e8115e874a51, entries=150, sequenceid=166, filesize=11.9 K 2024-11-22T19:22:08,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b9c03510318443ecb7d8d4637f3368f3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b9c03510318443ecb7d8d4637f3368f3 2024-11-22T19:22:08,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b9c03510318443ecb7d8d4637f3368f3, entries=150, sequenceid=166, filesize=11.9 K 2024-11-22T19:22:08,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 580ms, sequenceid=166, compaction requested=true 2024-11-22T19:22:08,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:08,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:08,377 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:08,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:08,378 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:08,381 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:08,381 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:08,381 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:08,381 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:08,381 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,381 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,382 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/db768bd5280e4d0082557f6a6ec7c2cf, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ebefe97f22b744cdb221c7a7cacd8f0a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/9d729f718e8c403f9c42dd9a84b7719c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=38.2 K 2024-11-22T19:22:08,382 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5419d037e70345588de36a0d6d3afd13, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/604b53015461438697085951c6cd5bba, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/10d26633a36240ceb1d3e8115e874a51] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.9 K 2024-11-22T19:22:08,383 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5419d037e70345588de36a0d6d3afd13, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732303325003 2024-11-22T19:22:08,383 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting db768bd5280e4d0082557f6a6ec7c2cf, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732303325003 2024-11-22T19:22:08,383 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebefe97f22b744cdb221c7a7cacd8f0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732303325638 2024-11-22T19:22:08,384 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 604b53015461438697085951c6cd5bba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732303325638 2024-11-22T19:22:08,385 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d729f718e8c403f9c42dd9a84b7719c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732303327788 2024-11-22T19:22:08,386 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 10d26633a36240ceb1d3e8115e874a51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732303327788 2024-11-22T19:22:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T19:22:08,408 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-22T19:22:08,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-22T19:22:08,413 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T19:22:08,414 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:08,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:08,442 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#36 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:08,443 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6e403b48054c49dba713c862d28b1305 is 50, key is test_row_0/A:col10/1732303327789/Put/seqid=0 2024-11-22T19:22:08,445 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#37 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:08,446 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/ebe28ca992bc422ca016e42b22774933 is 50, key is test_row_0/B:col10/1732303327789/Put/seqid=0 2024-11-22T19:22:08,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741875_1051 (size=12561) 2024-11-22T19:22:08,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741876_1052 (size=12561) 2024-11-22T19:22:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:08,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T19:22:08,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:08,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:08,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:08,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:08,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:08,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:08,506 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6e403b48054c49dba713c862d28b1305 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6e403b48054c49dba713c862d28b1305 2024-11-22T19:22:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T19:22:08,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c74fec59a6614e84a50189803e322f01 is 50, key is test_row_0/A:col10/1732303327858/Put/seqid=0 2024-11-22T19:22:08,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303388521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303388522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303388523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303388524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303388524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,530 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/ebe28ca992bc422ca016e42b22774933 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ebe28ca992bc422ca016e42b22774933 2024-11-22T19:22:08,534 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into 6e403b48054c49dba713c862d28b1305(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:08,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:08,534 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303328377; duration=0sec 2024-11-22T19:22:08,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:08,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:08,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:08,538 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:08,538 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:08,538 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,539 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/813e114eada04a65999f6dfedde05c44, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/344ce40fe0bf48f3bb6b9b56d29e0f27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b9c03510318443ecb7d8d4637f3368f3] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=35.9 K 2024-11-22T19:22:08,540 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 813e114eada04a65999f6dfedde05c44, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732303325003 2024-11-22T19:22:08,540 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 344ce40fe0bf48f3bb6b9b56d29e0f27, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732303325638 2024-11-22T19:22:08,541 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9c03510318443ecb7d8d4637f3368f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732303327788 2024-11-22T19:22:08,547 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into ebe28ca992bc422ca016e42b22774933(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:08,547 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:08,547 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303328378; duration=0sec 2024-11-22T19:22:08,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:08,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:08,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741877_1053 (size=12151) 2024-11-22T19:22:08,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c74fec59a6614e84a50189803e322f01 2024-11-22T19:22:08,568 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:08,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:08,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:08,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,586 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:08,587 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/af215b74a31640848f618417ed8a4fad is 50, key is test_row_0/C:col10/1732303327789/Put/seqid=0 2024-11-22T19:22:08,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/9f70c166a1cc4ef7a9bb34fa9208b152 is 50, key is test_row_0/B:col10/1732303327858/Put/seqid=0 2024-11-22T19:22:08,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303388626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303388627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303388628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303388631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303388632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741878_1054 (size=12561) 2024-11-22T19:22:08,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741879_1055 (size=12151) 2024-11-22T19:22:08,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/9f70c166a1cc4ef7a9bb34fa9208b152 2024-11-22T19:22:08,698 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/af215b74a31640848f618417ed8a4fad as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/af215b74a31640848f618417ed8a4fad 2024-11-22T19:22:08,709 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into af215b74a31640848f618417ed8a4fad(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:08,709 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:08,709 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303328380; duration=0sec 2024-11-22T19:22:08,710 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:08,710 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:08,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/864d0380133f4f4d935aeda7dc7702c5 is 50, key is test_row_0/C:col10/1732303327858/Put/seqid=0 2024-11-22T19:22:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T19:22:08,725 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:08,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:08,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741880_1056 (size=12151) 2024-11-22T19:22:08,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:08,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/864d0380133f4f4d935aeda7dc7702c5 2024-11-22T19:22:08,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c74fec59a6614e84a50189803e322f01 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c74fec59a6614e84a50189803e322f01 2024-11-22T19:22:08,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c74fec59a6614e84a50189803e322f01, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T19:22:08,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/9f70c166a1cc4ef7a9bb34fa9208b152 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9f70c166a1cc4ef7a9bb34fa9208b152 2024-11-22T19:22:08,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9f70c166a1cc4ef7a9bb34fa9208b152, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T19:22:08,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/864d0380133f4f4d935aeda7dc7702c5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/864d0380133f4f4d935aeda7dc7702c5 2024-11-22T19:22:08,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/864d0380133f4f4d935aeda7dc7702c5, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T19:22:08,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 304ms, sequenceid=194, compaction requested=false 2024-11-22T19:22:08,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:08,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:08,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:08,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:08,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:08,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:08,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:08,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:08,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/3b6aca3e60a44bc6a45bc3686779e356 is 50, key is test_row_0/A:col10/1732303328844/Put/seqid=0 2024-11-22T19:22:08,883 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:08,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:08,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:08,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:08,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:08,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741881_1057 (size=14541) 2024-11-22T19:22:08,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/3b6aca3e60a44bc6a45bc3686779e356 2024-11-22T19:22:08,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303388897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303388901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303388901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303388903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303388904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:08,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/a6c7f94d9f604f458e8ff7249ebd1d52 is 50, key is test_row_0/B:col10/1732303328844/Put/seqid=0 2024-11-22T19:22:08,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741882_1058 (size=12151) 2024-11-22T19:22:08,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/a6c7f94d9f604f458e8ff7249ebd1d52 2024-11-22T19:22:08,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/145586c3e9674daaba1b4a9d99527d1b is 50, key is test_row_0/C:col10/1732303328844/Put/seqid=0 2024-11-22T19:22:09,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741883_1059 (size=12151) 2024-11-22T19:22:09,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303389007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303389009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303389010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303389011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303389011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T19:22:09,038 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:09,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:09,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:09,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:09,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:09,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303389215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303389219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303389221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303389227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303389247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,349 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:09,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:09,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:09,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:09,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/145586c3e9674daaba1b4a9d99527d1b 2024-11-22T19:22:09,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/3b6aca3e60a44bc6a45bc3686779e356 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3b6aca3e60a44bc6a45bc3686779e356 2024-11-22T19:22:09,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3b6aca3e60a44bc6a45bc3686779e356, entries=200, sequenceid=210, filesize=14.2 K 2024-11-22T19:22:09,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/a6c7f94d9f604f458e8ff7249ebd1d52 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/a6c7f94d9f604f458e8ff7249ebd1d52 2024-11-22T19:22:09,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/a6c7f94d9f604f458e8ff7249ebd1d52, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:22:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/145586c3e9674daaba1b4a9d99527d1b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/145586c3e9674daaba1b4a9d99527d1b 2024-11-22T19:22:09,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/145586c3e9674daaba1b4a9d99527d1b, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:22:09,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 661ms, sequenceid=210, compaction requested=true 2024-11-22T19:22:09,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:09,508 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:09,508 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:09,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:09,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:09,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:09,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:09,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:09,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:22:09,510 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:09,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T19:22:09,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,511 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:22:09,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:09,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:09,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:09,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:09,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:09,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:09,514 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:09,514 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:09,514 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,514 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6e403b48054c49dba713c862d28b1305, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c74fec59a6614e84a50189803e322f01, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3b6aca3e60a44bc6a45bc3686779e356] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=38.3 K 2024-11-22T19:22:09,516 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:09,516 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:09,516 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,517 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ebe28ca992bc422ca016e42b22774933, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9f70c166a1cc4ef7a9bb34fa9208b152, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/a6c7f94d9f604f458e8ff7249ebd1d52] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.0 K 2024-11-22T19:22:09,517 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e403b48054c49dba713c862d28b1305, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732303327788 2024-11-22T19:22:09,518 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ebe28ca992bc422ca016e42b22774933, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732303327788 2024-11-22T19:22:09,518 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c74fec59a6614e84a50189803e322f01, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732303327858 2024-11-22T19:22:09,519 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f70c166a1cc4ef7a9bb34fa9208b152, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732303327858 2024-11-22T19:22:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T19:22:09,519 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b6aca3e60a44bc6a45bc3686779e356, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303328518 2024-11-22T19:22:09,520 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a6c7f94d9f604f458e8ff7249ebd1d52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303328842 2024-11-22T19:22:09,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/52d2351088ac495eb10fbe8200dac861 is 50, key is test_row_0/A:col10/1732303328877/Put/seqid=0 2024-11-22T19:22:09,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:09,554 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#46 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:09,555 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4276f4ce807b410991af3913ea81d761 is 50, key is test_row_0/B:col10/1732303328844/Put/seqid=0 2024-11-22T19:22:09,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303389559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303389560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,567 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#47 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:09,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303389562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,568 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/7a8d077566c844d2b5e65cd29c6481d2 is 50, key is test_row_0/A:col10/1732303328844/Put/seqid=0 2024-11-22T19:22:09,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303389566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303389566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741884_1060 (size=12151) 2024-11-22T19:22:09,610 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/52d2351088ac495eb10fbe8200dac861 2024-11-22T19:22:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741885_1061 (size=12663) 2024-11-22T19:22:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741886_1062 (size=12663) 2024-11-22T19:22:09,630 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4276f4ce807b410991af3913ea81d761 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4276f4ce807b410991af3913ea81d761 2024-11-22T19:22:09,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4056dd1fb96449dc81dfa118684e5e7a is 50, key is test_row_0/B:col10/1732303328877/Put/seqid=0 2024-11-22T19:22:09,642 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/7a8d077566c844d2b5e65cd29c6481d2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/7a8d077566c844d2b5e65cd29c6481d2 2024-11-22T19:22:09,645 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 4276f4ce807b410991af3913ea81d761(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:09,645 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:09,645 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303329509; duration=0sec 2024-11-22T19:22:09,645 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:09,645 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:09,645 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:09,648 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:09,648 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:09,648 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,649 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/af215b74a31640848f618417ed8a4fad, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/864d0380133f4f4d935aeda7dc7702c5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/145586c3e9674daaba1b4a9d99527d1b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.0 K 2024-11-22T19:22:09,649 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting af215b74a31640848f618417ed8a4fad, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732303327788 2024-11-22T19:22:09,658 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 864d0380133f4f4d935aeda7dc7702c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732303327858 2024-11-22T19:22:09,661 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 145586c3e9674daaba1b4a9d99527d1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303328842 2024-11-22T19:22:09,666 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into 7a8d077566c844d2b5e65cd29c6481d2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:09,667 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:09,667 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303329508; duration=0sec 2024-11-22T19:22:09,667 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:09,667 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:09,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741887_1063 (size=12151) 2024-11-22T19:22:09,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303389673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303389672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303389677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303389682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,689 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4056dd1fb96449dc81dfa118684e5e7a 2024-11-22T19:22:09,694 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#49 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:09,700 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2a2a2fd8861945739033bdbbf4e0ff21 is 50, key is test_row_0/C:col10/1732303328844/Put/seqid=0 2024-11-22T19:22:09,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303389688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e63c69e1ce914c4aa76d337eaf864ece is 50, key is test_row_0/C:col10/1732303328877/Put/seqid=0 2024-11-22T19:22:09,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741888_1064 (size=12663) 2024-11-22T19:22:09,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741889_1065 (size=12151) 2024-11-22T19:22:09,805 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e63c69e1ce914c4aa76d337eaf864ece 2024-11-22T19:22:09,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/52d2351088ac495eb10fbe8200dac861 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/52d2351088ac495eb10fbe8200dac861 2024-11-22T19:22:09,856 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/52d2351088ac495eb10fbe8200dac861, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T19:22:09,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4056dd1fb96449dc81dfa118684e5e7a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4056dd1fb96449dc81dfa118684e5e7a 2024-11-22T19:22:09,871 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4056dd1fb96449dc81dfa118684e5e7a, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T19:22:09,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e63c69e1ce914c4aa76d337eaf864ece as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e63c69e1ce914c4aa76d337eaf864ece 2024-11-22T19:22:09,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303389882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303389892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,897 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e63c69e1ce914c4aa76d337eaf864ece, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T19:22:09,899 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 388ms, sequenceid=233, compaction requested=false 2024-11-22T19:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-22T19:22:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-22T19:22:09,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-22T19:22:09,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4880 sec 2024-11-22T19:22:09,906 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.4940 sec 2024-11-22T19:22:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:09,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T19:22:09,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:09,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:09,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:09,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:09,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:09,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:09,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b72dac42c02f4b1d8082a37bebc21fd7 is 50, key is test_row_0/A:col10/1732303329910/Put/seqid=0 2024-11-22T19:22:09,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303389979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303389982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303389978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741890_1066 (size=14541) 2024-11-22T19:22:10,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b72dac42c02f4b1d8082a37bebc21fd7 2024-11-22T19:22:10,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4be20bfb0b1d497b9106b1d7252fa021 is 50, key is test_row_0/B:col10/1732303329910/Put/seqid=0 2024-11-22T19:22:10,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741891_1067 (size=12151) 2024-11-22T19:22:10,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4be20bfb0b1d497b9106b1d7252fa021 2024-11-22T19:22:10,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303390092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303390093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303390093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/877dc1cef811436386ce43fdae8af642 is 50, key is test_row_0/C:col10/1732303329910/Put/seqid=0 2024-11-22T19:22:10,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741892_1068 (size=12151) 2024-11-22T19:22:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,195 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2a2a2fd8861945739033bdbbf4e0ff21 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2a2a2fd8861945739033bdbbf4e0ff21 2024-11-22T19:22:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303390194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303390202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,211 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 2a2a2fd8861945739033bdbbf4e0ff21(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:10,212 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:10,212 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303329509; duration=0sec 2024-11-22T19:22:10,212 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:10,212 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:10,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303390303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303390304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303390304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T19:22:10,521 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-22T19:22:10,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-22T19:22:10,531 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:10,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T19:22:10,532 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:10,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:10,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/877dc1cef811436386ce43fdae8af642 2024-11-22T19:22:10,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b72dac42c02f4b1d8082a37bebc21fd7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b72dac42c02f4b1d8082a37bebc21fd7 2024-11-22T19:22:10,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b72dac42c02f4b1d8082a37bebc21fd7, entries=200, sequenceid=252, filesize=14.2 K 2024-11-22T19:22:10,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/4be20bfb0b1d497b9106b1d7252fa021 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4be20bfb0b1d497b9106b1d7252fa021 2024-11-22T19:22:10,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303390608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4be20bfb0b1d497b9106b1d7252fa021, entries=150, sequenceid=252, filesize=11.9 K 2024-11-22T19:22:10,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/877dc1cef811436386ce43fdae8af642 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/877dc1cef811436386ce43fdae8af642 2024-11-22T19:22:10,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303390618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303390612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/877dc1cef811436386ce43fdae8af642, entries=150, sequenceid=252, filesize=11.9 K 2024-11-22T19:22:10,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 717ms, sequenceid=252, compaction requested=true 2024-11-22T19:22:10,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:10,632 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:10,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T19:22:10,634 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:10,634 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:10,634 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:10,634 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/7a8d077566c844d2b5e65cd29c6481d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/52d2351088ac495eb10fbe8200dac861, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b72dac42c02f4b1d8082a37bebc21fd7] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=38.4 K 2024-11-22T19:22:10,635 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a8d077566c844d2b5e65cd29c6481d2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303328842 2024-11-22T19:22:10,636 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52d2351088ac495eb10fbe8200dac861, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732303328877 2024-11-22T19:22:10,636 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b72dac42c02f4b1d8082a37bebc21fd7, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732303329562 2024-11-22T19:22:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:10,650 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:10,651 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:10,651 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:10,651 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:10,652 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4276f4ce807b410991af3913ea81d761, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4056dd1fb96449dc81dfa118684e5e7a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4be20bfb0b1d497b9106b1d7252fa021] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.1 K 2024-11-22T19:22:10,652 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4276f4ce807b410991af3913ea81d761, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303328842 2024-11-22T19:22:10,653 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4056dd1fb96449dc81dfa118684e5e7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732303328877 2024-11-22T19:22:10,653 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4be20bfb0b1d497b9106b1d7252fa021, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732303329562 2024-11-22T19:22:10,662 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:10,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:10,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:10,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:10,663 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f19576bde23f4d889f5e84320b1c370c is 50, key is test_row_0/A:col10/1732303329910/Put/seqid=0 2024-11-22T19:22:10,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:10,674 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#55 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:10,675 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/71994200674a48658ad970f8390f64f2 is 50, key is test_row_0/B:col10/1732303329910/Put/seqid=0 2024-11-22T19:22:10,684 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:10,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T19:22:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:10,687 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:10,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:10,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:10,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:10,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:10,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:10,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:10,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:10,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6de95a2223bc41448650aa286d8fa93e is 50, key is test_row_0/A:col10/1732303329979/Put/seqid=0 2024-11-22T19:22:10,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741893_1069 (size=12765) 2024-11-22T19:22:10,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741894_1070 (size=12765) 2024-11-22T19:22:10,751 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f19576bde23f4d889f5e84320b1c370c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f19576bde23f4d889f5e84320b1c370c 2024-11-22T19:22:10,756 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/71994200674a48658ad970f8390f64f2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/71994200674a48658ad970f8390f64f2 2024-11-22T19:22:10,768 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into f19576bde23f4d889f5e84320b1c370c(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:10,768 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:10,768 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303330632; duration=0sec 2024-11-22T19:22:10,768 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:10,768 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:10,768 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:10,768 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 71994200674a48658ad970f8390f64f2(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:10,768 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:10,768 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303330650; duration=0sec 2024-11-22T19:22:10,769 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:10,769 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:10,770 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:10,771 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:10,771 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:10,771 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2a2a2fd8861945739033bdbbf4e0ff21, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e63c69e1ce914c4aa76d337eaf864ece, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/877dc1cef811436386ce43fdae8af642] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.1 K 2024-11-22T19:22:10,771 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a2a2fd8861945739033bdbbf4e0ff21, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303328842 2024-11-22T19:22:10,772 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e63c69e1ce914c4aa76d337eaf864ece, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732303328877 2024-11-22T19:22:10,772 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 877dc1cef811436386ce43fdae8af642, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732303329562 2024-11-22T19:22:10,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741895_1071 (size=12301) 2024-11-22T19:22:10,778 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6de95a2223bc41448650aa286d8fa93e 2024-11-22T19:22:10,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303390799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303390799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,810 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#57 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:10,811 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b3fe6a255d0d406bbabbca4d351b4190 is 50, key is test_row_0/C:col10/1732303329910/Put/seqid=0 2024-11-22T19:22:10,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d37ca8d91c364bd3901d114700c30fe5 is 50, key is test_row_0/B:col10/1732303329979/Put/seqid=0 2024-11-22T19:22:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T19:22:10,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741896_1072 (size=12765) 2024-11-22T19:22:10,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741897_1073 (size=12301) 2024-11-22T19:22:10,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303390906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:10,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:10,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303390907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303391118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303391117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303391125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303391129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T19:22:11,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303391130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,285 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b3fe6a255d0d406bbabbca4d351b4190 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b3fe6a255d0d406bbabbca4d351b4190 2024-11-22T19:22:11,298 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d37ca8d91c364bd3901d114700c30fe5 2024-11-22T19:22:11,309 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into b3fe6a255d0d406bbabbca4d351b4190(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:11,309 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:11,309 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303330663; duration=0sec 2024-11-22T19:22:11,309 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:11,310 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e9f49ff83d6c43ca979084d3c9d2df40 is 50, key is test_row_0/C:col10/1732303329979/Put/seqid=0 2024-11-22T19:22:11,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741898_1074 (size=12301) 2024-11-22T19:22:11,353 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e9f49ff83d6c43ca979084d3c9d2df40 2024-11-22T19:22:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6de95a2223bc41448650aa286d8fa93e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6de95a2223bc41448650aa286d8fa93e 2024-11-22T19:22:11,382 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6de95a2223bc41448650aa286d8fa93e, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T19:22:11,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d37ca8d91c364bd3901d114700c30fe5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d37ca8d91c364bd3901d114700c30fe5 2024-11-22T19:22:11,409 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d37ca8d91c364bd3901d114700c30fe5, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T19:22:11,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e9f49ff83d6c43ca979084d3c9d2df40 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e9f49ff83d6c43ca979084d3c9d2df40 2024-11-22T19:22:11,421 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e9f49ff83d6c43ca979084d3c9d2df40, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T19:22:11,426 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 740ms, sequenceid=272, compaction requested=false 2024-11-22T19:22:11,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:11,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:11,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-22T19:22:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-22T19:22:11,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-22T19:22:11,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 900 msec 2024-11-22T19:22:11,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 909 msec 2024-11-22T19:22:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:11,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T19:22:11,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:11,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:11,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:11,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:11,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:11,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:11,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/39b9a003981d4a839bdde58bc5c030b4 is 50, key is test_row_0/A:col10/1732303330796/Put/seqid=0 2024-11-22T19:22:11,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303391501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303391503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741899_1075 (size=12301) 2024-11-22T19:22:11,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303391607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303391613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T19:22:11,637 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-22T19:22:11,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-22T19:22:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:11,642 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:11,643 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:11,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:11,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:11,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:11,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:11,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:11,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:11,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:11,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:11,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303391813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303391817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:11,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/39b9a003981d4a839bdde58bc5c030b4 2024-11-22T19:22:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:11,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:11,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:11,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:11,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:11,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:11,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:11,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:11,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/cce343efc3cd4d9eba3383105838df4b is 50, key is test_row_0/B:col10/1732303330796/Put/seqid=0 2024-11-22T19:22:12,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741900_1076 (size=12301) 2024-11-22T19:22:12,015 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/cce343efc3cd4d9eba3383105838df4b 2024-11-22T19:22:12,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/6283aef5c7f54cf0ae1d721052bb6595 is 50, key is test_row_0/C:col10/1732303330796/Put/seqid=0 2024-11-22T19:22:12,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741901_1077 (size=12301) 2024-11-22T19:22:12,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/6283aef5c7f54cf0ae1d721052bb6595 2024-11-22T19:22:12,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/39b9a003981d4a839bdde58bc5c030b4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/39b9a003981d4a839bdde58bc5c030b4 2024-11-22T19:22:12,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/39b9a003981d4a839bdde58bc5c030b4, entries=150, sequenceid=295, filesize=12.0 K 2024-11-22T19:22:12,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/cce343efc3cd4d9eba3383105838df4b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cce343efc3cd4d9eba3383105838df4b 2024-11-22T19:22:12,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cce343efc3cd4d9eba3383105838df4b, entries=150, sequenceid=295, filesize=12.0 K 2024-11-22T19:22:12,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/6283aef5c7f54cf0ae1d721052bb6595 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/6283aef5c7f54cf0ae1d721052bb6595 2024-11-22T19:22:12,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:12,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:12,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:12,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303392124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/6283aef5c7f54cf0ae1d721052bb6595, entries=150, sequenceid=295, filesize=12.0 K 2024-11-22T19:22:12,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 688ms, sequenceid=295, compaction requested=true 2024-11-22T19:22:12,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,129 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:12,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:12,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:12,131 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:12,131 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:12,131 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:12,131 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:12,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:12,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:12,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:12,131 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f19576bde23f4d889f5e84320b1c370c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6de95a2223bc41448650aa286d8fa93e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/39b9a003981d4a839bdde58bc5c030b4] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.5 K 2024-11-22T19:22:12,132 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f19576bde23f4d889f5e84320b1c370c, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732303329562 2024-11-22T19:22:12,132 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6de95a2223bc41448650aa286d8fa93e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732303329963 2024-11-22T19:22:12,133 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:12,133 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:12,133 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,133 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/71994200674a48658ad970f8390f64f2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d37ca8d91c364bd3901d114700c30fe5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cce343efc3cd4d9eba3383105838df4b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.5 K 2024-11-22T19:22:12,133 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39b9a003981d4a839bdde58bc5c030b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732303330791 2024-11-22T19:22:12,134 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 71994200674a48658ad970f8390f64f2, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732303329562 2024-11-22T19:22:12,136 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d37ca8d91c364bd3901d114700c30fe5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732303329963 2024-11-22T19:22:12,137 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cce343efc3cd4d9eba3383105838df4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732303330791 2024-11-22T19:22:12,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:12,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T19:22:12,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:12,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,163 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#63 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:12,164 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f9a8e636756e4a0a909d11968063cc77 is 50, key is test_row_0/A:col10/1732303330796/Put/seqid=0 2024-11-22T19:22:12,165 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#64 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:12,166 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/17cdc321934947a1ab8e6e65d03a0658 is 50, key is test_row_0/B:col10/1732303330796/Put/seqid=0 2024-11-22T19:22:12,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/97e142d716dd475598af946c557c922d is 50, key is test_row_0/A:col10/1732303332142/Put/seqid=0 2024-11-22T19:22:12,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303392196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303392198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303392201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303392202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741903_1079 (size=13017) 2024-11-22T19:22:12,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741902_1078 (size=13017) 2024-11-22T19:22:12,226 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/17cdc321934947a1ab8e6e65d03a0658 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/17cdc321934947a1ab8e6e65d03a0658 2024-11-22T19:22:12,227 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f9a8e636756e4a0a909d11968063cc77 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f9a8e636756e4a0a909d11968063cc77 2024-11-22T19:22:12,240 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into f9a8e636756e4a0a909d11968063cc77(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:12,240 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 17cdc321934947a1ab8e6e65d03a0658(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,241 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303332130; duration=0sec 2024-11-22T19:22:12,241 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303332129; duration=0sec 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:12,241 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:12,243 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:12,243 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:12,243 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,243 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b3fe6a255d0d406bbabbca4d351b4190, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e9f49ff83d6c43ca979084d3c9d2df40, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/6283aef5c7f54cf0ae1d721052bb6595] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.5 K 2024-11-22T19:22:12,244 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b3fe6a255d0d406bbabbca4d351b4190, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732303329562 2024-11-22T19:22:12,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:12,245 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e9f49ff83d6c43ca979084d3c9d2df40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732303329963 2024-11-22T19:22:12,246 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6283aef5c7f54cf0ae1d721052bb6595, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732303330791 2024-11-22T19:22:12,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741904_1080 (size=14741) 2024-11-22T19:22:12,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/97e142d716dd475598af946c557c922d 2024-11-22T19:22:12,274 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:12,275 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:12,276 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7ad6f2652b0d445b9a39a302490cb199 is 50, key is test_row_0/C:col10/1732303330796/Put/seqid=0 2024-11-22T19:22:12,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:12,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:12,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/1cedb4d1b2fd4acc82685a3714d688b4 is 50, key is test_row_0/B:col10/1732303332142/Put/seqid=0 2024-11-22T19:22:12,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741905_1081 (size=13017) 2024-11-22T19:22:12,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303392306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303392308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303392306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,321 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7ad6f2652b0d445b9a39a302490cb199 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ad6f2652b0d445b9a39a302490cb199 2024-11-22T19:22:12,333 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 7ad6f2652b0d445b9a39a302490cb199(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:12,333 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,333 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303332131; duration=0sec 2024-11-22T19:22:12,333 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:12,333 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:12,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303392307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741906_1082 (size=12301) 2024-11-22T19:22:12,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/1cedb4d1b2fd4acc82685a3714d688b4 2024-11-22T19:22:12,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b6c4ea8112fd4fedb6dfd74f3bf45567 is 50, key is test_row_0/C:col10/1732303332142/Put/seqid=0 2024-11-22T19:22:12,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741907_1083 (size=12301) 2024-11-22T19:22:12,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b6c4ea8112fd4fedb6dfd74f3bf45567 2024-11-22T19:22:12,434 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:12,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:12,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/97e142d716dd475598af946c557c922d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/97e142d716dd475598af946c557c922d 2024-11-22T19:22:12,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/97e142d716dd475598af946c557c922d, entries=200, sequenceid=314, filesize=14.4 K 2024-11-22T19:22:12,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/1cedb4d1b2fd4acc82685a3714d688b4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/1cedb4d1b2fd4acc82685a3714d688b4 2024-11-22T19:22:12,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/1cedb4d1b2fd4acc82685a3714d688b4, entries=150, sequenceid=314, filesize=12.0 K 2024-11-22T19:22:12,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b6c4ea8112fd4fedb6dfd74f3bf45567 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b6c4ea8112fd4fedb6dfd74f3bf45567 2024-11-22T19:22:12,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b6c4ea8112fd4fedb6dfd74f3bf45567, entries=150, sequenceid=314, filesize=12.0 K 2024-11-22T19:22:12,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 327ms, sequenceid=314, compaction requested=false 2024-11-22T19:22:12,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:12,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:12,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:12,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:12,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:12,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/0f61ca35413445bdabd1e3cf5a6929c6 is 50, key is test_row_0/A:col10/1732303332517/Put/seqid=0 2024-11-22T19:22:12,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303392561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741908_1084 (size=14741) 2024-11-22T19:22:12,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303392569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303392570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303392570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/0f61ca35413445bdabd1e3cf5a6929c6 2024-11-22T19:22:12,588 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:12,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,589 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/ec006918df36498ea733cc1d93e81a86 is 50, key is test_row_0/B:col10/1732303332517/Put/seqid=0 2024-11-22T19:22:12,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303392631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741909_1085 (size=12301) 2024-11-22T19:22:12,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/ec006918df36498ea733cc1d93e81a86 2024-11-22T19:22:12,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303392675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303392678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303392679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303392678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/13889243d1e746cfb9d3ef86c5b2c691 is 50, key is test_row_0/C:col10/1732303332517/Put/seqid=0 2024-11-22T19:22:12,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741910_1086 (size=12301) 2024-11-22T19:22:12,744 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:12,744 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/13889243d1e746cfb9d3ef86c5b2c691 2024-11-22T19:22:12,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:12,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:12,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/0f61ca35413445bdabd1e3cf5a6929c6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0f61ca35413445bdabd1e3cf5a6929c6 2024-11-22T19:22:12,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0f61ca35413445bdabd1e3cf5a6929c6, entries=200, sequenceid=336, filesize=14.4 K 2024-11-22T19:22:12,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/ec006918df36498ea733cc1d93e81a86 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ec006918df36498ea733cc1d93e81a86 2024-11-22T19:22:12,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ec006918df36498ea733cc1d93e81a86, entries=150, sequenceid=336, filesize=12.0 K 2024-11-22T19:22:12,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/13889243d1e746cfb9d3ef86c5b2c691 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/13889243d1e746cfb9d3ef86c5b2c691 2024-11-22T19:22:12,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/13889243d1e746cfb9d3ef86c5b2c691, entries=150, sequenceid=336, filesize=12.0 K 2024-11-22T19:22:12,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 261ms, sequenceid=336, compaction requested=true 2024-11-22T19:22:12,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:12,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:12,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:12,785 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:12,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:12,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:12,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:22:12,785 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:12,788 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:12,788 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:12,788 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,788 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f9a8e636756e4a0a909d11968063cc77, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/97e142d716dd475598af946c557c922d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0f61ca35413445bdabd1e3cf5a6929c6] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=41.5 K 2024-11-22T19:22:12,789 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9a8e636756e4a0a909d11968063cc77, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732303330791 2024-11-22T19:22:12,790 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:12,790 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97e142d716dd475598af946c557c922d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732303331499 2024-11-22T19:22:12,790 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:12,790 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,790 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/17cdc321934947a1ab8e6e65d03a0658, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/1cedb4d1b2fd4acc82685a3714d688b4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ec006918df36498ea733cc1d93e81a86] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.7 K 2024-11-22T19:22:12,791 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f61ca35413445bdabd1e3cf5a6929c6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732303332193 2024-11-22T19:22:12,791 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 17cdc321934947a1ab8e6e65d03a0658, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732303330791 2024-11-22T19:22:12,792 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cedb4d1b2fd4acc82685a3714d688b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732303331499 2024-11-22T19:22:12,792 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ec006918df36498ea733cc1d93e81a86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732303332194 2024-11-22T19:22:12,812 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:12,815 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:12,816 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6caf296b355045d0953ce9744c8b46d4 is 50, key is test_row_0/A:col10/1732303332517/Put/seqid=0 2024-11-22T19:22:12,817 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/8190bf0cebea4b96aa8a0b640e403b20 is 50, key is test_row_0/B:col10/1732303332517/Put/seqid=0 2024-11-22T19:22:12,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741911_1087 (size=13119) 2024-11-22T19:22:12,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741912_1088 (size=13119) 2024-11-22T19:22:12,892 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6caf296b355045d0953ce9744c8b46d4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6caf296b355045d0953ce9744c8b46d4 2024-11-22T19:22:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:12,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T19:22:12,898 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:12,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:12,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:12,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:12,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:12,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:12,903 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into 6caf296b355045d0953ce9744c8b46d4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:12,903 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:12,903 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303332785; duration=0sec 2024-11-22T19:22:12,903 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:12,903 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:12,903 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:12,905 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:12,905 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:12,905 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:12,905 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ad6f2652b0d445b9a39a302490cb199, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b6c4ea8112fd4fedb6dfd74f3bf45567, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/13889243d1e746cfb9d3ef86c5b2c691] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.7 K 2024-11-22T19:22:12,906 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ad6f2652b0d445b9a39a302490cb199, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732303330791 2024-11-22T19:22:12,906 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6c4ea8112fd4fedb6dfd74f3bf45567, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732303331499 2024-11-22T19:22:12,907 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13889243d1e746cfb9d3ef86c5b2c691, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732303332194 2024-11-22T19:22:12,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/0edc6979b105441d9e797a6a6fe63d93 is 50, key is test_row_0/A:col10/1732303332555/Put/seqid=0 2024-11-22T19:22:12,946 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:12,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303392939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,947 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/51bc1592ab494107b924713cbfcca58a is 50, key is test_row_0/C:col10/1732303332517/Put/seqid=0 2024-11-22T19:22:12,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303392942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303392948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303392949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:12,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741913_1089 (size=17181) 2024-11-22T19:22:12,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/0edc6979b105441d9e797a6a6fe63d93 2024-11-22T19:22:13,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741914_1090 (size=13119) 2024-11-22T19:22:13,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/6524d685565344bb94d26f22beddcb9e is 50, key is test_row_0/B:col10/1732303332555/Put/seqid=0 2024-11-22T19:22:13,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303393049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,053 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303393051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303393062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303393064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741915_1091 (size=12301) 2024-11-22T19:22:13,206 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303393262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303393264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303393267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303393271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,275 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/8190bf0cebea4b96aa8a0b640e403b20 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/8190bf0cebea4b96aa8a0b640e403b20 2024-11-22T19:22:13,302 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 8190bf0cebea4b96aa8a0b640e403b20(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:13,302 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:13,302 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303332785; duration=0sec 2024-11-22T19:22:13,302 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:13,302 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:13,361 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,424 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/51bc1592ab494107b924713cbfcca58a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/51bc1592ab494107b924713cbfcca58a 2024-11-22T19:22:13,433 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 51bc1592ab494107b924713cbfcca58a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:13,433 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:13,433 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303332785; duration=0sec 2024-11-22T19:22:13,433 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:13,433 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:13,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/6524d685565344bb94d26f22beddcb9e 2024-11-22T19:22:13,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/4c6c42cff3cf4cba92f837dcb39eaa6b is 50, key is test_row_0/C:col10/1732303332555/Put/seqid=0 2024-11-22T19:22:13,515 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741916_1092 (size=12301) 2024-11-22T19:22:13,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/4c6c42cff3cf4cba92f837dcb39eaa6b 2024-11-22T19:22:13,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303393569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303393575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303393575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303393578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/0edc6979b105441d9e797a6a6fe63d93 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0edc6979b105441d9e797a6a6fe63d93 2024-11-22T19:22:13,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0edc6979b105441d9e797a6a6fe63d93, entries=250, sequenceid=356, filesize=16.8 K 2024-11-22T19:22:13,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/6524d685565344bb94d26f22beddcb9e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/6524d685565344bb94d26f22beddcb9e 2024-11-22T19:22:13,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/6524d685565344bb94d26f22beddcb9e, entries=150, sequenceid=356, filesize=12.0 K 2024-11-22T19:22:13,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/4c6c42cff3cf4cba92f837dcb39eaa6b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4c6c42cff3cf4cba92f837dcb39eaa6b 2024-11-22T19:22:13,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4c6c42cff3cf4cba92f837dcb39eaa6b, entries=150, sequenceid=356, filesize=12.0 K 2024-11-22T19:22:13,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 716ms, sequenceid=356, compaction requested=false 2024-11-22T19:22:13,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:13,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T19:22:13,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:13,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/2cd527e559a34db78cec016e5635c5ee is 50, key is test_row_0/A:col10/1732303332930/Put/seqid=0 2024-11-22T19:22:13,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741917_1093 (size=14741) 2024-11-22T19:22:13,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/2cd527e559a34db78cec016e5635c5ee 2024-11-22T19:22:13,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d22dbda0ff4745bfb4358f2299036f02 is 50, key is test_row_0/B:col10/1732303332930/Put/seqid=0 2024-11-22T19:22:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:13,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303393754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741918_1094 (size=12301) 2024-11-22T19:22:13,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d22dbda0ff4745bfb4358f2299036f02 2024-11-22T19:22:13,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/d387318ce3304ffb9f7897c28f7c2c0b is 50, key is test_row_0/C:col10/1732303332930/Put/seqid=0 2024-11-22T19:22:13,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741919_1095 (size=12301) 2024-11-22T19:22:13,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303393860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:13,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:13,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:13,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:13,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:13,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:14,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303394066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303394074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303394083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303394085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303394090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,141 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:14,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:14,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:14,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:14,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:14,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:14,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/d387318ce3304ffb9f7897c28f7c2c0b 2024-11-22T19:22:14,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/2cd527e559a34db78cec016e5635c5ee as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/2cd527e559a34db78cec016e5635c5ee 2024-11-22T19:22:14,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/2cd527e559a34db78cec016e5635c5ee, entries=200, sequenceid=377, filesize=14.4 K 2024-11-22T19:22:14,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d22dbda0ff4745bfb4358f2299036f02 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d22dbda0ff4745bfb4358f2299036f02 2024-11-22T19:22:14,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d22dbda0ff4745bfb4358f2299036f02, entries=150, sequenceid=377, filesize=12.0 K 2024-11-22T19:22:14,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/d387318ce3304ffb9f7897c28f7c2c0b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d387318ce3304ffb9f7897c28f7c2c0b 2024-11-22T19:22:14,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d387318ce3304ffb9f7897c28f7c2c0b, entries=150, sequenceid=377, filesize=12.0 K 2024-11-22T19:22:14,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 648ms, sequenceid=377, compaction requested=true 2024-11-22T19:22:14,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:14,291 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:14,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:14,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:14,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:14,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:14,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:14,292 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:14,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:14,294 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:14,295 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:14,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T19:22:14,295 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:14,295 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,295 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6caf296b355045d0953ce9744c8b46d4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0edc6979b105441d9e797a6a6fe63d93, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/2cd527e559a34db78cec016e5635c5ee] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=44.0 K 2024-11-22T19:22:14,295 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T19:22:14,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:14,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:14,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:14,296 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6caf296b355045d0953ce9744c8b46d4, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732303332194 2024-11-22T19:22:14,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:14,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:14,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:14,296 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:14,296 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:14,296 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,296 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/8190bf0cebea4b96aa8a0b640e403b20, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/6524d685565344bb94d26f22beddcb9e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d22dbda0ff4745bfb4358f2299036f02] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.8 K 2024-11-22T19:22:14,297 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0edc6979b105441d9e797a6a6fe63d93, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732303332555 2024-11-22T19:22:14,297 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8190bf0cebea4b96aa8a0b640e403b20, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732303332194 2024-11-22T19:22:14,297 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cd527e559a34db78cec016e5635c5ee, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732303332930 2024-11-22T19:22:14,297 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6524d685565344bb94d26f22beddcb9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732303332555 2024-11-22T19:22:14,299 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d22dbda0ff4745bfb4358f2299036f02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732303332930 2024-11-22T19:22:14,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/53e72d3d82544fb3aaf6ceb71895c1e2 is 50, key is test_row_0/A:col10/1732303333702/Put/seqid=0 2024-11-22T19:22:14,327 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:14,328 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f6977b17a2034152b85cc0d27e944e73 is 50, key is test_row_0/A:col10/1732303332930/Put/seqid=0 2024-11-22T19:22:14,336 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#83 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:14,337 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3fe07008ca61475bab7bfd96a142a420 is 50, key is test_row_0/B:col10/1732303332930/Put/seqid=0 2024-11-22T19:22:14,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741922_1098 (size=13221) 2024-11-22T19:22:14,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:14,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741921_1097 (size=13221) 2024-11-22T19:22:14,396 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/f6977b17a2034152b85cc0d27e944e73 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6977b17a2034152b85cc0d27e944e73 2024-11-22T19:22:14,404 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into f6977b17a2034152b85cc0d27e944e73(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:14,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:14,404 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303334290; duration=0sec 2024-11-22T19:22:14,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:14,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:14,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:14,406 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:14,406 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:14,406 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,406 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/51bc1592ab494107b924713cbfcca58a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4c6c42cff3cf4cba92f837dcb39eaa6b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d387318ce3304ffb9f7897c28f7c2c0b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.8 K 2024-11-22T19:22:14,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741920_1096 (size=12301) 2024-11-22T19:22:14,407 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51bc1592ab494107b924713cbfcca58a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732303332194 2024-11-22T19:22:14,408 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/53e72d3d82544fb3aaf6ceb71895c1e2 2024-11-22T19:22:14,408 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c6c42cff3cf4cba92f837dcb39eaa6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732303332555 2024-11-22T19:22:14,408 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d387318ce3304ffb9f7897c28f7c2c0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732303332930 2024-11-22T19:22:14,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/fce5bc871bc6481c994ba7f345273e18 is 50, key is test_row_0/B:col10/1732303333702/Put/seqid=0 2024-11-22T19:22:14,436 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#85 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:14,437 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/4a018656809e4140b11d561505aee5b6 is 50, key is test_row_0/C:col10/1732303332930/Put/seqid=0 2024-11-22T19:22:14,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303394468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741923_1099 (size=12301) 2024-11-22T19:22:14,479 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/fce5bc871bc6481c994ba7f345273e18 2024-11-22T19:22:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741924_1100 (size=13221) 2024-11-22T19:22:14,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/5c635280bc1445e9876b5ba601d01e58 is 50, key is test_row_0/C:col10/1732303333702/Put/seqid=0 2024-11-22T19:22:14,499 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/4a018656809e4140b11d561505aee5b6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4a018656809e4140b11d561505aee5b6 2024-11-22T19:22:14,508 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 4a018656809e4140b11d561505aee5b6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:14,508 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:14,508 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303334292; duration=0sec 2024-11-22T19:22:14,508 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:14,508 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:14,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741925_1101 (size=12301) 2024-11-22T19:22:14,551 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/5c635280bc1445e9876b5ba601d01e58 2024-11-22T19:22:14,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/53e72d3d82544fb3aaf6ceb71895c1e2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/53e72d3d82544fb3aaf6ceb71895c1e2 2024-11-22T19:22:14,572 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/53e72d3d82544fb3aaf6ceb71895c1e2, entries=150, sequenceid=395, filesize=12.0 K 2024-11-22T19:22:14,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/fce5bc871bc6481c994ba7f345273e18 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fce5bc871bc6481c994ba7f345273e18 2024-11-22T19:22:14,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303394575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,581 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fce5bc871bc6481c994ba7f345273e18, entries=150, sequenceid=395, filesize=12.0 K 2024-11-22T19:22:14,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/5c635280bc1445e9876b5ba601d01e58 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/5c635280bc1445e9876b5ba601d01e58 2024-11-22T19:22:14,588 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/5c635280bc1445e9876b5ba601d01e58, entries=150, sequenceid=395, filesize=12.0 K 2024-11-22T19:22:14,589 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 294ms, sequenceid=395, compaction requested=false 2024-11-22T19:22:14,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:14,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:14,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-22T19:22:14,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-22T19:22:14,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-22T19:22:14,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9480 sec 2024-11-22T19:22:14,595 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.9550 sec 2024-11-22T19:22:14,777 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3fe07008ca61475bab7bfd96a142a420 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3fe07008ca61475bab7bfd96a142a420 2024-11-22T19:22:14,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:14,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:14,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:14,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:14,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:14,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:14,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:14,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:14,793 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 3fe07008ca61475bab7bfd96a142a420(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:14,793 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:14,793 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303334291; duration=0sec 2024-11-22T19:22:14,793 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:14,793 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:14,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/49df3da54eab4e1294b3f9ded35f3a9c is 50, key is test_row_0/A:col10/1732303334783/Put/seqid=0 2024-11-22T19:22:14,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741926_1102 (size=14741) 2024-11-22T19:22:14,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/49df3da54eab4e1294b3f9ded35f3a9c 2024-11-22T19:22:14,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/0cd0e511a6734eedb2e2544428b3885d is 50, key is test_row_0/B:col10/1732303334783/Put/seqid=0 2024-11-22T19:22:14,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303394875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:14,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741927_1103 (size=12301) 2024-11-22T19:22:14,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/0cd0e511a6734eedb2e2544428b3885d 2024-11-22T19:22:14,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/3b104b457f4443f7a98a980ec0d343da is 50, key is test_row_0/C:col10/1732303334783/Put/seqid=0 2024-11-22T19:22:14,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741928_1104 (size=12301) 2024-11-22T19:22:14,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:14,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303394982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303395083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303395094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303395100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303395104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303395188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/3b104b457f4443f7a98a980ec0d343da 2024-11-22T19:22:15,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/49df3da54eab4e1294b3f9ded35f3a9c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/49df3da54eab4e1294b3f9ded35f3a9c 2024-11-22T19:22:15,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/49df3da54eab4e1294b3f9ded35f3a9c, entries=200, sequenceid=417, filesize=14.4 K 2024-11-22T19:22:15,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/0cd0e511a6734eedb2e2544428b3885d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/0cd0e511a6734eedb2e2544428b3885d 2024-11-22T19:22:15,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/0cd0e511a6734eedb2e2544428b3885d, entries=150, sequenceid=417, filesize=12.0 K 2024-11-22T19:22:15,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/3b104b457f4443f7a98a980ec0d343da as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/3b104b457f4443f7a98a980ec0d343da 2024-11-22T19:22:15,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/3b104b457f4443f7a98a980ec0d343da, entries=150, sequenceid=417, filesize=12.0 K 2024-11-22T19:22:15,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 653ms, sequenceid=417, compaction requested=true 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:15,442 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:15,442 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:15,443 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:15,444 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:15,444 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:15,444 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6977b17a2034152b85cc0d27e944e73, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/53e72d3d82544fb3aaf6ceb71895c1e2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/49df3da54eab4e1294b3f9ded35f3a9c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=39.3 K 2024-11-22T19:22:15,444 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:15,444 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:15,444 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:15,445 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3fe07008ca61475bab7bfd96a142a420, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fce5bc871bc6481c994ba7f345273e18, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/0cd0e511a6734eedb2e2544428b3885d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.9 K 2024-11-22T19:22:15,445 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6977b17a2034152b85cc0d27e944e73, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732303332930 2024-11-22T19:22:15,446 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53e72d3d82544fb3aaf6ceb71895c1e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303333702 2024-11-22T19:22:15,446 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fe07008ca61475bab7bfd96a142a420, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732303332930 2024-11-22T19:22:15,446 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting fce5bc871bc6481c994ba7f345273e18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303333702 2024-11-22T19:22:15,446 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49df3da54eab4e1294b3f9ded35f3a9c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732303334418 2024-11-22T19:22:15,447 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cd0e511a6734eedb2e2544428b3885d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732303334460 2024-11-22T19:22:15,460 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#90 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:15,461 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6fe5a469359445c08ce28e3574f4783f is 50, key is test_row_0/A:col10/1732303334783/Put/seqid=0 2024-11-22T19:22:15,474 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#91 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:15,475 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/b2a84e4a65054fdb83b4ac20975cceb5 is 50, key is test_row_0/B:col10/1732303334783/Put/seqid=0 2024-11-22T19:22:15,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741929_1105 (size=13323) 2024-11-22T19:22:15,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:15,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T19:22:15,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:15,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741930_1106 (size=13323) 2024-11-22T19:22:15,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/edcb543d63d14d59aacb3ad8bb75536a is 50, key is test_row_0/A:col10/1732303334874/Put/seqid=0 2024-11-22T19:22:15,510 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/6fe5a469359445c08ce28e3574f4783f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6fe5a469359445c08ce28e3574f4783f 2024-11-22T19:22:15,517 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/b2a84e4a65054fdb83b4ac20975cceb5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b2a84e4a65054fdb83b4ac20975cceb5 2024-11-22T19:22:15,537 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into 6fe5a469359445c08ce28e3574f4783f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:15,537 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:15,537 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303335442; duration=0sec 2024-11-22T19:22:15,537 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:15,537 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:15,537 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:15,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741931_1107 (size=14741) 2024-11-22T19:22:15,540 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into b2a84e4a65054fdb83b4ac20975cceb5(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:15,540 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:15,540 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303335442; duration=0sec 2024-11-22T19:22:15,540 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:15,540 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:15,541 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:15,541 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:15,541 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:15,541 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4a018656809e4140b11d561505aee5b6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/5c635280bc1445e9876b5ba601d01e58, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/3b104b457f4443f7a98a980ec0d343da] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=36.9 K 2024-11-22T19:22:15,542 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a018656809e4140b11d561505aee5b6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732303332930 2024-11-22T19:22:15,543 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c635280bc1445e9876b5ba601d01e58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303333702 2024-11-22T19:22:15,543 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b104b457f4443f7a98a980ec0d343da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732303334460 2024-11-22T19:22:15,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/edcb543d63d14d59aacb3ad8bb75536a 2024-11-22T19:22:15,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/21874b59375449358dfa5412ea80c4fe is 50, key is test_row_0/B:col10/1732303334874/Put/seqid=0 2024-11-22T19:22:15,588 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#94 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:15,589 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e4338388529441d384f70064e8af658a is 50, key is test_row_0/C:col10/1732303334783/Put/seqid=0 2024-11-22T19:22:15,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303395587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741932_1108 (size=12301) 2024-11-22T19:22:15,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/21874b59375449358dfa5412ea80c4fe 2024-11-22T19:22:15,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741933_1109 (size=13323) 2024-11-22T19:22:15,653 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e4338388529441d384f70064e8af658a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e4338388529441d384f70064e8af658a 2024-11-22T19:22:15,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e21c6e1f7fec42fbb764f269e02f4db9 is 50, key is test_row_0/C:col10/1732303334874/Put/seqid=0 2024-11-22T19:22:15,661 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into e4338388529441d384f70064e8af658a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:15,661 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:15,661 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303335442; duration=0sec 2024-11-22T19:22:15,661 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:15,661 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:15,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741934_1110 (size=12301) 2024-11-22T19:22:15,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e21c6e1f7fec42fbb764f269e02f4db9 2024-11-22T19:22:15,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:15,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303395693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:15,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/edcb543d63d14d59aacb3ad8bb75536a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/edcb543d63d14d59aacb3ad8bb75536a 2024-11-22T19:22:15,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/edcb543d63d14d59aacb3ad8bb75536a, entries=200, sequenceid=435, filesize=14.4 K 2024-11-22T19:22:15,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/21874b59375449358dfa5412ea80c4fe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/21874b59375449358dfa5412ea80c4fe 2024-11-22T19:22:15,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/21874b59375449358dfa5412ea80c4fe, entries=150, sequenceid=435, filesize=12.0 K 2024-11-22T19:22:15,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/e21c6e1f7fec42fbb764f269e02f4db9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e21c6e1f7fec42fbb764f269e02f4db9 2024-11-22T19:22:15,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e21c6e1f7fec42fbb764f269e02f4db9, entries=150, sequenceid=435, filesize=12.0 K 2024-11-22T19:22:15,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T19:22:15,749 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-22T19:22:15,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:15,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 255ms, sequenceid=435, compaction requested=false 2024-11-22T19:22:15,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:15,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-22T19:22:15,753 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:15,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T19:22:15,754 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:15,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:15,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T19:22:15,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:15,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:15,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:15,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:15,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:15,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:15,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:15,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:15,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:15,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T19:22:15,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:15,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:15,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:15,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:15,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:15,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:15,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/cd022d0976474bd6b12399462dfd110a is 50, key is test_row_0/A:col10/1732303335585/Put/seqid=0 2024-11-22T19:22:15,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741935_1111 (size=14741) 2024-11-22T19:22:15,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/cd022d0976474bd6b12399462dfd110a 2024-11-22T19:22:15,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/9b512660de5349979605e08f486a4a11 is 50, key is test_row_0/B:col10/1732303335585/Put/seqid=0 2024-11-22T19:22:16,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741936_1112 (size=12301) 2024-11-22T19:22:16,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:16,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303396000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:16,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/9b512660de5349979605e08f486a4a11 2024-11-22T19:22:16,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/bbf6ea1f0c70494596b34316a26f662a is 50, key is test_row_0/C:col10/1732303335585/Put/seqid=0 2024-11-22T19:22:16,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T19:22:16,063 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:16,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T19:22:16,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:16,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741937_1113 (size=12301) 2024-11-22T19:22:16,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:16,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303396108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:16,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:16,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T19:22:16,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:16,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303396315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T19:22:16,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:16,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T19:22:16,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:16,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:16,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/bbf6ea1f0c70494596b34316a26f662a 2024-11-22T19:22:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/cd022d0976474bd6b12399462dfd110a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/cd022d0976474bd6b12399462dfd110a 2024-11-22T19:22:16,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/cd022d0976474bd6b12399462dfd110a, entries=200, sequenceid=457, filesize=14.4 K 2024-11-22T19:22:16,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/9b512660de5349979605e08f486a4a11 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9b512660de5349979605e08f486a4a11 2024-11-22T19:22:16,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9b512660de5349979605e08f486a4a11, entries=150, sequenceid=457, filesize=12.0 K 2024-11-22T19:22:16,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/bbf6ea1f0c70494596b34316a26f662a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bbf6ea1f0c70494596b34316a26f662a 2024-11-22T19:22:16,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bbf6ea1f0c70494596b34316a26f662a, entries=150, sequenceid=457, filesize=12.0 K 2024-11-22T19:22:16,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 613ms, sequenceid=457, compaction requested=true 2024-11-22T19:22:16,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:16,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:16,514 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:16,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:16,515 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:16,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:16,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:16,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:16,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:16,517 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42805 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:16,517 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:16,518 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,518 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6fe5a469359445c08ce28e3574f4783f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/edcb543d63d14d59aacb3ad8bb75536a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/cd022d0976474bd6b12399462dfd110a] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=41.8 K 2024-11-22T19:22:16,518 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:16,518 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:16,518 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,518 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b2a84e4a65054fdb83b4ac20975cceb5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/21874b59375449358dfa5412ea80c4fe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9b512660de5349979605e08f486a4a11] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.0 K 2024-11-22T19:22:16,519 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b2a84e4a65054fdb83b4ac20975cceb5, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732303334460 2024-11-22T19:22:16,519 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fe5a469359445c08ce28e3574f4783f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732303334460 2024-11-22T19:22:16,520 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 21874b59375449358dfa5412ea80c4fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732303334846 2024-11-22T19:22:16,521 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting edcb543d63d14d59aacb3ad8bb75536a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732303334846 2024-11-22T19:22:16,522 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b512660de5349979605e08f486a4a11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303335575 2024-11-22T19:22:16,522 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd022d0976474bd6b12399462dfd110a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303335575 2024-11-22T19:22:16,540 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:16,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T19:22:16,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,541 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T19:22:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:16,554 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:16,555 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/702fe8e554374f86be8cc967be75b21d is 50, key is test_row_0/B:col10/1732303335585/Put/seqid=0 2024-11-22T19:22:16,569 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:16,569 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/d2dfa40889da4625aed515ebcadccd22 is 50, key is test_row_0/A:col10/1732303335585/Put/seqid=0 2024-11-22T19:22:16,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6 is 50, key is test_row_0/A:col10/1732303335977/Put/seqid=0 2024-11-22T19:22:16,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741938_1114 (size=13425) 2024-11-22T19:22:16,635 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/702fe8e554374f86be8cc967be75b21d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/702fe8e554374f86be8cc967be75b21d 2024-11-22T19:22:16,645 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 702fe8e554374f86be8cc967be75b21d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:16,645 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:16,646 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303336515; duration=0sec 2024-11-22T19:22:16,646 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:16,646 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:16,646 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:16,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741939_1115 (size=13425) 2024-11-22T19:22:16,651 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:16,651 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:16,651 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:16,651 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e4338388529441d384f70064e8af658a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e21c6e1f7fec42fbb764f269e02f4db9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bbf6ea1f0c70494596b34316a26f662a] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.0 K 2024-11-22T19:22:16,655 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e4338388529441d384f70064e8af658a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732303334460 2024-11-22T19:22:16,655 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e21c6e1f7fec42fbb764f269e02f4db9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732303334846 2024-11-22T19:22:16,656 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting bbf6ea1f0c70494596b34316a26f662a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303335575 2024-11-22T19:22:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741940_1116 (size=12301) 2024-11-22T19:22:16,694 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#102 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:16,695 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/259b99e68e364d5195388bc8453856fa is 50, key is test_row_0/C:col10/1732303335585/Put/seqid=0 2024-11-22T19:22:16,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:16,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303396723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:16,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741941_1117 (size=13425) 2024-11-22T19:22:16,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:16,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303396829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:16,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T19:22:17,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303397034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,057 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/d2dfa40889da4625aed515ebcadccd22 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d2dfa40889da4625aed515ebcadccd22 2024-11-22T19:22:17,064 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into d2dfa40889da4625aed515ebcadccd22(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:17,064 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:17,064 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303336514; duration=0sec 2024-11-22T19:22:17,065 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:17,065 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:17,082 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6 2024-11-22T19:22:17,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47228 deadline: 1732303397097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,101 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:22:17,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/cef5713c7e1e4188892aa0bf74980bd4 is 50, key is test_row_0/B:col10/1732303335977/Put/seqid=0 2024-11-22T19:22:17,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47238 deadline: 1732303397104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,108 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:22:17,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47256 deadline: 1732303397115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,120 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4173 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:22:17,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47248 deadline: 1732303397124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,126 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:22:17,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741942_1118 (size=12301) 2024-11-22T19:22:17,158 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/259b99e68e364d5195388bc8453856fa as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/259b99e68e364d5195388bc8453856fa 2024-11-22T19:22:17,158 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/cef5713c7e1e4188892aa0bf74980bd4 2024-11-22T19:22:17,165 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 259b99e68e364d5195388bc8453856fa(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:17,165 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:17,165 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303336516; duration=0sec 2024-11-22T19:22:17,166 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:17,166 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:17,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b68c16528b3749bf99b5d213902f27dc is 50, key is test_row_0/C:col10/1732303335977/Put/seqid=0 2024-11-22T19:22:17,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741943_1119 (size=12301) 2024-11-22T19:22:17,197 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b68c16528b3749bf99b5d213902f27dc 2024-11-22T19:22:17,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6 2024-11-22T19:22:17,218 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6, entries=150, sequenceid=474, filesize=12.0 K 2024-11-22T19:22:17,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/cef5713c7e1e4188892aa0bf74980bd4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cef5713c7e1e4188892aa0bf74980bd4 2024-11-22T19:22:17,227 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cef5713c7e1e4188892aa0bf74980bd4, entries=150, sequenceid=474, filesize=12.0 K 2024-11-22T19:22:17,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/b68c16528b3749bf99b5d213902f27dc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b68c16528b3749bf99b5d213902f27dc 2024-11-22T19:22:17,235 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b68c16528b3749bf99b5d213902f27dc, entries=150, sequenceid=474, filesize=12.0 K 2024-11-22T19:22:17,237 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 696ms, sequenceid=474, compaction requested=false 2024-11-22T19:22:17,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:17,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:17,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-22T19:22:17,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-22T19:22:17,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-22T19:22:17,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4840 sec 2024-11-22T19:22:17,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.4900 sec 2024-11-22T19:22:17,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:17,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T19:22:17,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:17,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:17,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:17,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:17,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:17,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:17,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e730795fedc14152b4641cdbb564d47e is 50, key is test_row_0/A:col10/1732303336720/Put/seqid=0 2024-11-22T19:22:17,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741944_1120 (size=14741) 2024-11-22T19:22:17,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303397425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303397533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:17,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303397741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:17,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e730795fedc14152b4641cdbb564d47e 2024-11-22T19:22:17,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3a91cad7ffed42e4a3e5387fa5a03847 is 50, key is test_row_0/B:col10/1732303336720/Put/seqid=0 2024-11-22T19:22:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T19:22:17,859 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-22T19:22:17,861 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-22T19:22:17,863 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T19:22:17,864 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:17,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:17,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741945_1121 (size=12301) 2024-11-22T19:22:17,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3a91cad7ffed42e4a3e5387fa5a03847 2024-11-22T19:22:17,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7a9a4a153ae34f319a8f24f09c120fff is 50, key is test_row_0/C:col10/1732303336720/Put/seqid=0 2024-11-22T19:22:17,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741946_1122 (size=12301) 2024-11-22T19:22:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T19:22:18,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:18,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-22T19:22:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:18,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303398044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T19:22:18,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:18,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-22T19:22:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,325 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:18,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-22T19:22:18,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:18,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:18,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7a9a4a153ae34f319a8f24f09c120fff 2024-11-22T19:22:18,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e730795fedc14152b4641cdbb564d47e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e730795fedc14152b4641cdbb564d47e 2024-11-22T19:22:18,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e730795fedc14152b4641cdbb564d47e, entries=200, sequenceid=497, filesize=14.4 K 2024-11-22T19:22:18,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/3a91cad7ffed42e4a3e5387fa5a03847 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3a91cad7ffed42e4a3e5387fa5a03847 2024-11-22T19:22:18,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3a91cad7ffed42e4a3e5387fa5a03847, entries=150, sequenceid=497, filesize=12.0 K 2024-11-22T19:22:18,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7a9a4a153ae34f319a8f24f09c120fff as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7a9a4a153ae34f319a8f24f09c120fff 2024-11-22T19:22:18,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7a9a4a153ae34f319a8f24f09c120fff, entries=150, sequenceid=497, filesize=12.0 K 2024-11-22T19:22:18,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 1041ms, sequenceid=497, compaction requested=true 2024-11-22T19:22:18,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:18,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:18,384 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:18,384 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:18,386 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:18,386 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:18,386 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,386 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d2dfa40889da4625aed515ebcadccd22, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e730795fedc14152b4641cdbb564d47e] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=39.5 K 2024-11-22T19:22:18,387 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:18,387 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:18,387 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,387 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/702fe8e554374f86be8cc967be75b21d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cef5713c7e1e4188892aa0bf74980bd4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3a91cad7ffed42e4a3e5387fa5a03847] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.1 K 2024-11-22T19:22:18,388 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2dfa40889da4625aed515ebcadccd22, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303335575 2024-11-22T19:22:18,388 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 702fe8e554374f86be8cc967be75b21d, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303335575 2024-11-22T19:22:18,389 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79ab4f6311fd48e6b9e9c8bd2c2ac9a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732303335977 2024-11-22T19:22:18,389 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cef5713c7e1e4188892aa0bf74980bd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732303335977 2024-11-22T19:22:18,390 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a91cad7ffed42e4a3e5387fa5a03847, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303336717 2024-11-22T19:22:18,390 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e730795fedc14152b4641cdbb564d47e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303336688 2024-11-22T19:22:18,414 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#108 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:18,415 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/bcdd9662c8664593bb7485a25c6b46cb is 50, key is test_row_0/A:col10/1732303336720/Put/seqid=0 2024-11-22T19:22:18,421 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#109 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:18,422 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/079998da92c343678c98eb98523bbb97 is 50, key is test_row_0/B:col10/1732303336720/Put/seqid=0 2024-11-22T19:22:18,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741948_1124 (size=13527) 2024-11-22T19:22:18,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741947_1123 (size=13527) 2024-11-22T19:22:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T19:22:18,479 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:18,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-22T19:22:18,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,481 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T19:22:18,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:18,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:18,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:18,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:18,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:18,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:18,499 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/079998da92c343678c98eb98523bbb97 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/079998da92c343678c98eb98523bbb97 2024-11-22T19:22:18,501 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/bcdd9662c8664593bb7485a25c6b46cb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/bcdd9662c8664593bb7485a25c6b46cb 2024-11-22T19:22:18,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/5508c4ea8e404fceb295feddf4a62ae5 is 50, key is test_row_0/A:col10/1732303337370/Put/seqid=0 2024-11-22T19:22:18,513 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 079998da92c343678c98eb98523bbb97(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:18,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:18,514 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303338384; duration=0sec 2024-11-22T19:22:18,514 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:18,514 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:18,514 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:18,516 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:18,517 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into bcdd9662c8664593bb7485a25c6b46cb(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:18,517 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:18,517 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:18,517 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303338383; duration=0sec 2024-11-22T19:22:18,517 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:18,517 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:18,517 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:18,517 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/259b99e68e364d5195388bc8453856fa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b68c16528b3749bf99b5d213902f27dc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7a9a4a153ae34f319a8f24f09c120fff] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.1 K 2024-11-22T19:22:18,518 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 259b99e68e364d5195388bc8453856fa, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303335575 2024-11-22T19:22:18,518 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b68c16528b3749bf99b5d213902f27dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732303335977 2024-11-22T19:22:18,519 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a9a4a153ae34f319a8f24f09c120fff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303336717 2024-11-22T19:22:18,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741949_1125 (size=12301) 2024-11-22T19:22:18,546 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:18,547 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/d17a79a8ba9c45aca44c37e9d083c38e is 50, key is test_row_0/C:col10/1732303336720/Put/seqid=0 2024-11-22T19:22:18,548 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/5508c4ea8e404fceb295feddf4a62ae5 2024-11-22T19:22:18,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:18,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741950_1126 (size=13527) 2024-11-22T19:22:18,584 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/d17a79a8ba9c45aca44c37e9d083c38e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d17a79a8ba9c45aca44c37e9d083c38e 2024-11-22T19:22:18,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/56a8874616854397888b87d56932b65e is 50, key is test_row_0/B:col10/1732303337370/Put/seqid=0 2024-11-22T19:22:18,595 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into d17a79a8ba9c45aca44c37e9d083c38e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:18,595 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:18,595 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303338384; duration=0sec 2024-11-22T19:22:18,595 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:18,595 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:18,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741951_1127 (size=12301) 2024-11-22T19:22:18,624 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/56a8874616854397888b87d56932b65e 2024-11-22T19:22:18,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/84a85a59bae645f2b8d06f16ef0af3bd is 50, key is test_row_0/C:col10/1732303337370/Put/seqid=0 2024-11-22T19:22:18,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303398683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:18,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741952_1128 (size=12301) 2024-11-22T19:22:18,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303398787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T19:22:18,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303398998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:19,087 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/84a85a59bae645f2b8d06f16ef0af3bd 2024-11-22T19:22:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/5508c4ea8e404fceb295feddf4a62ae5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5508c4ea8e404fceb295feddf4a62ae5 2024-11-22T19:22:19,116 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5508c4ea8e404fceb295feddf4a62ae5, entries=150, sequenceid=513, filesize=12.0 K 2024-11-22T19:22:19,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/56a8874616854397888b87d56932b65e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/56a8874616854397888b87d56932b65e 2024-11-22T19:22:19,130 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/56a8874616854397888b87d56932b65e, entries=150, sequenceid=513, filesize=12.0 K 2024-11-22T19:22:19,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/84a85a59bae645f2b8d06f16ef0af3bd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/84a85a59bae645f2b8d06f16ef0af3bd 2024-11-22T19:22:19,149 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/84a85a59bae645f2b8d06f16ef0af3bd, entries=150, sequenceid=513, filesize=12.0 K 2024-11-22T19:22:19,152 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 672ms, sequenceid=513, compaction requested=false 2024-11-22T19:22:19,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:19,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:19,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-22T19:22:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-22T19:22:19,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-22T19:22:19,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2910 sec 2024-11-22T19:22:19,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.2960 sec 2024-11-22T19:22:19,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:19,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T19:22:19,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:19,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:19,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:19,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:19,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:19,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:19,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/dd9a3db2b3694fd2bab90e6d1979bcb3 is 50, key is test_row_0/A:col10/1732303338677/Put/seqid=0 2024-11-22T19:22:19,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741953_1129 (size=14741) 2024-11-22T19:22:19,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/dd9a3db2b3694fd2bab90e6d1979bcb3 2024-11-22T19:22:19,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:19,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 321 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303399363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:19,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/f413c9679db54e5abbee05a9919bac9e is 50, key is test_row_0/B:col10/1732303338677/Put/seqid=0 2024-11-22T19:22:19,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741954_1130 (size=12301) 2024-11-22T19:22:19,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:19,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303399470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:19,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:19,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303399677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:19,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/f413c9679db54e5abbee05a9919bac9e 2024-11-22T19:22:19,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/9ccb34fb81a7407cb0a6760e30f0eb85 is 50, key is test_row_0/C:col10/1732303338677/Put/seqid=0 2024-11-22T19:22:19,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741955_1131 (size=12301) 2024-11-22T19:22:19,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/9ccb34fb81a7407cb0a6760e30f0eb85 2024-11-22T19:22:19,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/dd9a3db2b3694fd2bab90e6d1979bcb3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/dd9a3db2b3694fd2bab90e6d1979bcb3 2024-11-22T19:22:19,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/dd9a3db2b3694fd2bab90e6d1979bcb3, entries=200, sequenceid=537, filesize=14.4 K 2024-11-22T19:22:19,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/f413c9679db54e5abbee05a9919bac9e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/f413c9679db54e5abbee05a9919bac9e 2024-11-22T19:22:19,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/f413c9679db54e5abbee05a9919bac9e, entries=150, sequenceid=537, filesize=12.0 K 2024-11-22T19:22:19,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/9ccb34fb81a7407cb0a6760e30f0eb85 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/9ccb34fb81a7407cb0a6760e30f0eb85 2024-11-22T19:22:19,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/9ccb34fb81a7407cb0a6760e30f0eb85, entries=150, sequenceid=537, filesize=12.0 K 2024-11-22T19:22:19,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 655ms, sequenceid=537, compaction requested=true 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:19,959 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:19,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:19,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:19,960 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40569 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:19,961 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:19,961 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:19,961 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/bcdd9662c8664593bb7485a25c6b46cb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5508c4ea8e404fceb295feddf4a62ae5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/dd9a3db2b3694fd2bab90e6d1979bcb3] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=39.6 K 2024-11-22T19:22:19,961 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:19,961 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:19,961 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:19,961 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/079998da92c343678c98eb98523bbb97, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/56a8874616854397888b87d56932b65e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/f413c9679db54e5abbee05a9919bac9e] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.2 K 2024-11-22T19:22:19,962 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcdd9662c8664593bb7485a25c6b46cb, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303336717 2024-11-22T19:22:19,962 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 079998da92c343678c98eb98523bbb97, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303336717 2024-11-22T19:22:19,962 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5508c4ea8e404fceb295feddf4a62ae5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732303337370 2024-11-22T19:22:19,963 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 56a8874616854397888b87d56932b65e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732303337370 2024-11-22T19:22:19,963 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd9a3db2b3694fd2bab90e6d1979bcb3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1732303338670 2024-11-22T19:22:19,963 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting f413c9679db54e5abbee05a9919bac9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1732303338677 2024-11-22T19:22:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T19:22:19,976 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-22T19:22:19,977 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:19,977 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b32db7f7869d42e582ea7b27130e1a17 is 50, key is test_row_0/A:col10/1732303338677/Put/seqid=0 2024-11-22T19:22:19,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:19,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-22T19:22:19,980 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:19,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:19,981 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:19,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:19,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T19:22:19,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:19,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:19,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:19,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:19,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:19,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:19,992 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#118 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:19,993 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2700918f3a304b3fafb7c587f90ddf1e is 50, key is test_row_0/B:col10/1732303338677/Put/seqid=0 2024-11-22T19:22:20,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/4c8bd770f1de4cb6a1effe593f827023 is 50, key is test_row_0/A:col10/1732303339982/Put/seqid=0 2024-11-22T19:22:20,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741956_1132 (size=13629) 2024-11-22T19:22:20,067 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/b32db7f7869d42e582ea7b27130e1a17 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b32db7f7869d42e582ea7b27130e1a17 2024-11-22T19:22:20,077 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into b32db7f7869d42e582ea7b27130e1a17(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:20,077 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:20,077 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303339959; duration=0sec 2024-11-22T19:22:20,077 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:20,077 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:20,077 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:20,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741957_1133 (size=13629) 2024-11-22T19:22:20,079 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:20,080 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:20,080 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,080 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d17a79a8ba9c45aca44c37e9d083c38e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/84a85a59bae645f2b8d06f16ef0af3bd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/9ccb34fb81a7407cb0a6760e30f0eb85] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.2 K 2024-11-22T19:22:20,080 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d17a79a8ba9c45aca44c37e9d083c38e, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303336717 2024-11-22T19:22:20,081 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84a85a59bae645f2b8d06f16ef0af3bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732303337370 2024-11-22T19:22:20,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:20,082 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ccb34fb81a7407cb0a6760e30f0eb85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1732303338677 2024-11-22T19:22:20,086 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2700918f3a304b3fafb7c587f90ddf1e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2700918f3a304b3fafb7c587f90ddf1e 2024-11-22T19:22:20,100 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into 2700918f3a304b3fafb7c587f90ddf1e(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:20,100 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:20,100 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303339959; duration=0sec 2024-11-22T19:22:20,100 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:20,100 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:20,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741958_1134 (size=12301) 2024-11-22T19:22:20,107 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:20,108 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/99a99506937a46f6acf6fb22f7922455 is 50, key is test_row_0/C:col10/1732303338677/Put/seqid=0 2024-11-22T19:22:20,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/4c8bd770f1de4cb6a1effe593f827023 2024-11-22T19:22:20,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d3359da25a77408192f6d9de037532e7 is 50, key is test_row_0/B:col10/1732303339982/Put/seqid=0 2024-11-22T19:22:20,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:20,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 345 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303400123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:20,133 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:20,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:20,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:20,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741959_1135 (size=13629) 2024-11-22T19:22:20,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741960_1136 (size=12301) 2024-11-22T19:22:20,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:20,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 347 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303400231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:20,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:20,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:20,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:20,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:20,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 349 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303400438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:20,441 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:20,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:20,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:20,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:20,595 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/99a99506937a46f6acf6fb22f7922455 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/99a99506937a46f6acf6fb22f7922455 2024-11-22T19:22:20,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:20,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,605 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into 99a99506937a46f6acf6fb22f7922455(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:20,605 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:20,605 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303339959; duration=0sec 2024-11-22T19:22:20,605 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:20,605 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:20,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d3359da25a77408192f6d9de037532e7 2024-11-22T19:22:20,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2006de7059734db9b72f568d72f44e17 is 50, key is test_row_0/C:col10/1732303339982/Put/seqid=0 2024-11-22T19:22:20,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741961_1137 (size=12301) 2024-11-22T19:22:20,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2006de7059734db9b72f568d72f44e17 2024-11-22T19:22:20,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/4c8bd770f1de4cb6a1effe593f827023 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/4c8bd770f1de4cb6a1effe593f827023 2024-11-22T19:22:20,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/4c8bd770f1de4cb6a1effe593f827023, entries=150, sequenceid=553, filesize=12.0 K 2024-11-22T19:22:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/d3359da25a77408192f6d9de037532e7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d3359da25a77408192f6d9de037532e7 2024-11-22T19:22:20,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d3359da25a77408192f6d9de037532e7, entries=150, sequenceid=553, filesize=12.0 K 2024-11-22T19:22:20,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/2006de7059734db9b72f568d72f44e17 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2006de7059734db9b72f568d72f44e17 2024-11-22T19:22:20,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2006de7059734db9b72f568d72f44e17, entries=150, sequenceid=553, filesize=12.0 K 2024-11-22T19:22:20,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 702ms, sequenceid=553, compaction requested=false 2024-11-22T19:22:20,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:20,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:20,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T19:22:20,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:20,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:20,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:20,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:20,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:20,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:20,754 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:20,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:20,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c764919fb1744319b1742f4c86265533 is 50, key is test_row_0/A:col10/1732303340743/Put/seqid=0 2024-11-22T19:22:20,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741962_1138 (size=14741) 2024-11-22T19:22:20,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c764919fb1744319b1742f4c86265533 2024-11-22T19:22:20,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:20,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 364 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303400790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:20,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/b0ad8f80569248dcb7e43e45f6d8bd2d is 50, key is test_row_0/B:col10/1732303340743/Put/seqid=0 2024-11-22T19:22:20,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741963_1139 (size=12301) 2024-11-22T19:22:20,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/b0ad8f80569248dcb7e43e45f6d8bd2d 2024-11-22T19:22:20,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7ee2443c1a7c48c4bb3edf376e4c2fdc is 50, key is test_row_0/C:col10/1732303340743/Put/seqid=0 2024-11-22T19:22:20,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 366 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47258 deadline: 1732303400892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:20,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741964_1140 (size=12301) 2024-11-22T19:22:20,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7ee2443c1a7c48c4bb3edf376e4c2fdc 2024-11-22T19:22:20,911 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:20,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/c764919fb1744319b1742f4c86265533 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c764919fb1744319b1742f4c86265533 2024-11-22T19:22:20,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:20,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c764919fb1744319b1742f4c86265533, entries=200, sequenceid=577, filesize=14.4 K 2024-11-22T19:22:20,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/b0ad8f80569248dcb7e43e45f6d8bd2d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b0ad8f80569248dcb7e43e45f6d8bd2d 2024-11-22T19:22:20,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b0ad8f80569248dcb7e43e45f6d8bd2d, entries=150, sequenceid=577, filesize=12.0 K 2024-11-22T19:22:20,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/7ee2443c1a7c48c4bb3edf376e4c2fdc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ee2443c1a7c48c4bb3edf376e4c2fdc 2024-11-22T19:22:20,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ee2443c1a7c48c4bb3edf376e4c2fdc, entries=150, sequenceid=577, filesize=12.0 K 2024-11-22T19:22:20,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 194ms, sequenceid=577, compaction requested=true 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:20,941 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 686ebaaf5a8e3b2d28eef9abb3c2302e:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:20,941 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:20,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:20,942 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:20,942 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:20,942 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/A is initiating minor compaction (all files) 2024-11-22T19:22:20,943 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/A in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,943 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b32db7f7869d42e582ea7b27130e1a17, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/4c8bd770f1de4cb6a1effe593f827023, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c764919fb1744319b1742f4c86265533] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=39.7 K 2024-11-22T19:22:20,943 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b32db7f7869d42e582ea7b27130e1a17, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1732303338677 2024-11-22T19:22:20,944 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8bd770f1de4cb6a1effe593f827023, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1732303339349 2024-11-22T19:22:20,944 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/B is initiating minor compaction (all files) 2024-11-22T19:22:20,944 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/B in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:20,944 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c764919fb1744319b1742f4c86265533, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1732303340084 2024-11-22T19:22:20,944 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2700918f3a304b3fafb7c587f90ddf1e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d3359da25a77408192f6d9de037532e7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b0ad8f80569248dcb7e43e45f6d8bd2d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.3 K 2024-11-22T19:22:20,945 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2700918f3a304b3fafb7c587f90ddf1e, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1732303338677 2024-11-22T19:22:20,946 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d3359da25a77408192f6d9de037532e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1732303339349 2024-11-22T19:22:20,946 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b0ad8f80569248dcb7e43e45f6d8bd2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1732303340114 2024-11-22T19:22:20,952 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6502d571 to 127.0.0.1:57120 2024-11-22T19:22:20,952 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:20,954 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b5cad1a to 127.0.0.1:57120 2024-11-22T19:22:20,954 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:20,955 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x767a8485 to 127.0.0.1:57120 2024-11-22T19:22:20,956 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:20,956 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c3b736e to 127.0.0.1:57120 2024-11-22T19:22:20,956 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:20,966 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#A#compaction#126 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:20,967 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/d99ccdabca4c4e69ab8a9a27a7c2430e is 50, key is test_row_0/A:col10/1732303340743/Put/seqid=0 2024-11-22T19:22:20,969 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#B#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:20,971 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/e6e10d44a1954640883a2523ac77d671 is 50, key is test_row_0/B:col10/1732303340743/Put/seqid=0 2024-11-22T19:22:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741965_1141 (size=13731) 2024-11-22T19:22:20,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741966_1142 (size=13731) 2024-11-22T19:22:21,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:21,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T19:22:21,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:21,068 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T19:22:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:21,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/068ab461cfcc4f26bfcc731289a52dd0 is 50, key is test_row_0/A:col10/1732303340761/Put/seqid=0 2024-11-22T19:22:21,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741967_1143 (size=12301) 2024-11-22T19:22:21,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:21,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:21,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. as already flushing 2024-11-22T19:22:21,100 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d38d10 to 127.0.0.1:57120 2024-11-22T19:22:21,100 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:21,115 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x736f1673 to 127.0.0.1:57120 2024-11-22T19:22:21,116 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:21,140 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f34ff67 to 127.0.0.1:57120 2024-11-22T19:22:21,140 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:21,141 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ee2166f to 127.0.0.1:57120 2024-11-22T19:22:21,141 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:21,157 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c63ae4e to 127.0.0.1:57120 2024-11-22T19:22:21,157 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:21,398 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/d99ccdabca4c4e69ab8a9a27a7c2430e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d99ccdabca4c4e69ab8a9a27a7c2430e 2024-11-22T19:22:21,398 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/e6e10d44a1954640883a2523ac77d671 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e6e10d44a1954640883a2523ac77d671 2024-11-22T19:22:21,405 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/A of 686ebaaf5a8e3b2d28eef9abb3c2302e into d99ccdabca4c4e69ab8a9a27a7c2430e(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:21,405 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:21,405 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/A, priority=13, startTime=1732303340941; duration=0sec 2024-11-22T19:22:21,405 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/B of 686ebaaf5a8e3b2d28eef9abb3c2302e into e6e10d44a1954640883a2523ac77d671(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:21,405 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:21,406 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/B, priority=13, startTime=1732303340941; duration=0sec 2024-11-22T19:22:21,406 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:21,406 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:A 2024-11-22T19:22:21,406 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:21,406 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:B 2024-11-22T19:22:21,406 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:21,407 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:21,407 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 686ebaaf5a8e3b2d28eef9abb3c2302e/C is initiating minor compaction (all files) 2024-11-22T19:22:21,407 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 686ebaaf5a8e3b2d28eef9abb3c2302e/C in TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:21,407 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/99a99506937a46f6acf6fb22f7922455, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2006de7059734db9b72f568d72f44e17, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ee2443c1a7c48c4bb3edf376e4c2fdc] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp, totalSize=37.3 K 2024-11-22T19:22:21,408 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99a99506937a46f6acf6fb22f7922455, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1732303338677 2024-11-22T19:22:21,408 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2006de7059734db9b72f568d72f44e17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1732303339349 2024-11-22T19:22:21,409 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ee2443c1a7c48c4bb3edf376e4c2fdc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1732303340114 2024-11-22T19:22:21,427 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 686ebaaf5a8e3b2d28eef9abb3c2302e#C#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:21,428 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/a7e630814c6d4ca1acc59da78ca57045 is 50, key is test_row_0/C:col10/1732303340743/Put/seqid=0 2024-11-22T19:22:21,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741968_1144 (size=13731) 2024-11-22T19:22:21,483 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=592 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/068ab461cfcc4f26bfcc731289a52dd0 2024-11-22T19:22:21,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2c6a1d5359c94289bff44d6156bd6971 is 50, key is test_row_0/B:col10/1732303340761/Put/seqid=0 2024-11-22T19:22:21,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741969_1145 (size=12301) 2024-11-22T19:22:21,856 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/a7e630814c6d4ca1acc59da78ca57045 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/a7e630814c6d4ca1acc59da78ca57045 2024-11-22T19:22:21,864 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 686ebaaf5a8e3b2d28eef9abb3c2302e/C of 686ebaaf5a8e3b2d28eef9abb3c2302e into a7e630814c6d4ca1acc59da78ca57045(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:21,864 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:21,864 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e., storeName=686ebaaf5a8e3b2d28eef9abb3c2302e/C, priority=13, startTime=1732303340941; duration=0sec 2024-11-22T19:22:21,864 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:21,864 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 686ebaaf5a8e3b2d28eef9abb3c2302e:C 2024-11-22T19:22:21,900 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=592 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2c6a1d5359c94289bff44d6156bd6971 2024-11-22T19:22:21,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/21da0b438b994e5da1694902a4942e38 is 50, key is test_row_0/C:col10/1732303340761/Put/seqid=0 2024-11-22T19:22:21,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741970_1146 (size=12301) 2024-11-22T19:22:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:22,323 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=592 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/21da0b438b994e5da1694902a4942e38 2024-11-22T19:22:22,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/068ab461cfcc4f26bfcc731289a52dd0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/068ab461cfcc4f26bfcc731289a52dd0 2024-11-22T19:22:22,336 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/068ab461cfcc4f26bfcc731289a52dd0, entries=150, sequenceid=592, filesize=12.0 K 2024-11-22T19:22:22,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/2c6a1d5359c94289bff44d6156bd6971 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2c6a1d5359c94289bff44d6156bd6971 2024-11-22T19:22:22,342 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2c6a1d5359c94289bff44d6156bd6971, entries=150, sequenceid=592, filesize=12.0 K 2024-11-22T19:22:22,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/21da0b438b994e5da1694902a4942e38 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/21da0b438b994e5da1694902a4942e38 2024-11-22T19:22:22,348 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/21da0b438b994e5da1694902a4942e38, entries=150, sequenceid=592, filesize=12.0 K 2024-11-22T19:22:22,349 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=33.54 KB/34350 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 1281ms, sequenceid=592, compaction requested=false 2024-11-22T19:22:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-22T19:22:22,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-22T19:22:22,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-22T19:22:22,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3700 sec 2024-11-22T19:22:22,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.3760 sec 2024-11-22T19:22:22,400 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T19:22:24,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T19:22:24,087 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 220 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3899 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3748 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1722 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5166 rows 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1744 2024-11-22T19:22:24,087 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5221 rows 2024-11-22T19:22:24,087 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:22:24,087 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fdf5682 to 127.0.0.1:57120 2024-11-22T19:22:24,087 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:24,095 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T19:22:24,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T19:22:24,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:24,108 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303344107"}]},"ts":"1732303344107"} 2024-11-22T19:22:24,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T19:22:24,109 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T19:22:24,113 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T19:22:24,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:22:24,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, UNASSIGN}] 2024-11-22T19:22:24,121 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, UNASSIGN 2024-11-22T19:22:24,121 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=686ebaaf5a8e3b2d28eef9abb3c2302e, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:24,122 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:22:24,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:24,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T19:22:24,278 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:24,280 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:24,280 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:22:24,281 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 686ebaaf5a8e3b2d28eef9abb3c2302e, disabling compactions & flushes 2024-11-22T19:22:24,281 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:24,281 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:24,281 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. after waiting 0 ms 2024-11-22T19:22:24,281 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:24,281 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing 686ebaaf5a8e3b2d28eef9abb3c2302e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T19:22:24,282 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=A 2024-11-22T19:22:24,282 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:24,282 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=B 2024-11-22T19:22:24,282 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:24,282 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 686ebaaf5a8e3b2d28eef9abb3c2302e, store=C 2024-11-22T19:22:24,282 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:24,288 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e0dc53652eb5431c996716e7f74794b5 is 50, key is test_row_0/A:col10/1732303341156/Put/seqid=0 2024-11-22T19:22:24,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741971_1147 (size=12301) 2024-11-22T19:22:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T19:22:24,695 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e0dc53652eb5431c996716e7f74794b5 2024-11-22T19:22:24,704 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/75ee36057f44460a9d161701baba9bda is 50, key is test_row_0/B:col10/1732303341156/Put/seqid=0 2024-11-22T19:22:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T19:22:24,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741972_1148 (size=12301) 2024-11-22T19:22:25,117 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/75ee36057f44460a9d161701baba9bda 2024-11-22T19:22:25,127 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/bccfd48ad82746a69798cb4d035620ef is 50, key is test_row_0/C:col10/1732303341156/Put/seqid=0 2024-11-22T19:22:25,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741973_1149 (size=12301) 2024-11-22T19:22:25,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T19:22:25,538 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/bccfd48ad82746a69798cb4d035620ef 2024-11-22T19:22:25,544 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/A/e0dc53652eb5431c996716e7f74794b5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e0dc53652eb5431c996716e7f74794b5 2024-11-22T19:22:25,551 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e0dc53652eb5431c996716e7f74794b5, entries=150, sequenceid=603, filesize=12.0 K 2024-11-22T19:22:25,552 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/B/75ee36057f44460a9d161701baba9bda as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/75ee36057f44460a9d161701baba9bda 2024-11-22T19:22:25,557 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/75ee36057f44460a9d161701baba9bda, entries=150, sequenceid=603, filesize=12.0 K 2024-11-22T19:22:25,558 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/.tmp/C/bccfd48ad82746a69798cb4d035620ef as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bccfd48ad82746a69798cb4d035620ef 2024-11-22T19:22:25,564 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bccfd48ad82746a69798cb4d035620ef, entries=150, sequenceid=603, filesize=12.0 K 2024-11-22T19:22:25,568 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 686ebaaf5a8e3b2d28eef9abb3c2302e in 1283ms, sequenceid=603, compaction requested=true 2024-11-22T19:22:25,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/51f9d6864357482dbf0fdc53e6220187, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e51942e0e6ca4a84b6d7040a6c886040, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b26d8f3e9620497f88428d849f951667, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c383e839e20c430bac38936c99c1a29b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5d289ca866e340a28aee534eb4dc2a42, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6a7fbf48aa245a9ad02eaef757f14cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3c5cde39fe14405eabe7eaa5a7971529, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/742a704aeb9746709733d1e9e0d5ef69, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/db768bd5280e4d0082557f6a6ec7c2cf, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ee56c44646ec47e587d59d32c8317433, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ebefe97f22b744cdb221c7a7cacd8f0a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/9d729f718e8c403f9c42dd9a84b7719c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6e403b48054c49dba713c862d28b1305, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c74fec59a6614e84a50189803e322f01, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3b6aca3e60a44bc6a45bc3686779e356, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/7a8d077566c844d2b5e65cd29c6481d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/52d2351088ac495eb10fbe8200dac861, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b72dac42c02f4b1d8082a37bebc21fd7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f19576bde23f4d889f5e84320b1c370c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6de95a2223bc41448650aa286d8fa93e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f9a8e636756e4a0a909d11968063cc77, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/39b9a003981d4a839bdde58bc5c030b4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/97e142d716dd475598af946c557c922d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0f61ca35413445bdabd1e3cf5a6929c6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6caf296b355045d0953ce9744c8b46d4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0edc6979b105441d9e797a6a6fe63d93, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/2cd527e559a34db78cec016e5635c5ee, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6977b17a2034152b85cc0d27e944e73, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/53e72d3d82544fb3aaf6ceb71895c1e2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/49df3da54eab4e1294b3f9ded35f3a9c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6fe5a469359445c08ce28e3574f4783f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/edcb543d63d14d59aacb3ad8bb75536a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/cd022d0976474bd6b12399462dfd110a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d2dfa40889da4625aed515ebcadccd22, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e730795fedc14152b4641cdbb564d47e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/bcdd9662c8664593bb7485a25c6b46cb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5508c4ea8e404fceb295feddf4a62ae5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/dd9a3db2b3694fd2bab90e6d1979bcb3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b32db7f7869d42e582ea7b27130e1a17, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/4c8bd770f1de4cb6a1effe593f827023, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c764919fb1744319b1742f4c86265533] to archive 2024-11-22T19:22:25,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:22:25,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/51f9d6864357482dbf0fdc53e6220187 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/51f9d6864357482dbf0fdc53e6220187 2024-11-22T19:22:25,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e51942e0e6ca4a84b6d7040a6c886040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e51942e0e6ca4a84b6d7040a6c886040 2024-11-22T19:22:25,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b26d8f3e9620497f88428d849f951667 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b26d8f3e9620497f88428d849f951667 2024-11-22T19:22:25,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c383e839e20c430bac38936c99c1a29b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c383e839e20c430bac38936c99c1a29b 2024-11-22T19:22:25,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5d289ca866e340a28aee534eb4dc2a42 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5d289ca866e340a28aee534eb4dc2a42 2024-11-22T19:22:25,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6a7fbf48aa245a9ad02eaef757f14cc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6a7fbf48aa245a9ad02eaef757f14cc 2024-11-22T19:22:25,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3c5cde39fe14405eabe7eaa5a7971529 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3c5cde39fe14405eabe7eaa5a7971529 2024-11-22T19:22:25,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/742a704aeb9746709733d1e9e0d5ef69 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/742a704aeb9746709733d1e9e0d5ef69 2024-11-22T19:22:25,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/db768bd5280e4d0082557f6a6ec7c2cf to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/db768bd5280e4d0082557f6a6ec7c2cf 2024-11-22T19:22:25,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ee56c44646ec47e587d59d32c8317433 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ee56c44646ec47e587d59d32c8317433 2024-11-22T19:22:25,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ebefe97f22b744cdb221c7a7cacd8f0a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/ebefe97f22b744cdb221c7a7cacd8f0a 2024-11-22T19:22:25,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/9d729f718e8c403f9c42dd9a84b7719c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/9d729f718e8c403f9c42dd9a84b7719c 2024-11-22T19:22:25,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6e403b48054c49dba713c862d28b1305 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6e403b48054c49dba713c862d28b1305 2024-11-22T19:22:25,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c74fec59a6614e84a50189803e322f01 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c74fec59a6614e84a50189803e322f01 2024-11-22T19:22:25,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3b6aca3e60a44bc6a45bc3686779e356 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/3b6aca3e60a44bc6a45bc3686779e356 2024-11-22T19:22:25,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/7a8d077566c844d2b5e65cd29c6481d2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/7a8d077566c844d2b5e65cd29c6481d2 2024-11-22T19:22:25,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/52d2351088ac495eb10fbe8200dac861 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/52d2351088ac495eb10fbe8200dac861 2024-11-22T19:22:25,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b72dac42c02f4b1d8082a37bebc21fd7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b72dac42c02f4b1d8082a37bebc21fd7 2024-11-22T19:22:25,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f19576bde23f4d889f5e84320b1c370c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f19576bde23f4d889f5e84320b1c370c 2024-11-22T19:22:25,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6de95a2223bc41448650aa286d8fa93e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6de95a2223bc41448650aa286d8fa93e 2024-11-22T19:22:25,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f9a8e636756e4a0a909d11968063cc77 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f9a8e636756e4a0a909d11968063cc77 2024-11-22T19:22:25,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/39b9a003981d4a839bdde58bc5c030b4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/39b9a003981d4a839bdde58bc5c030b4 2024-11-22T19:22:25,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/97e142d716dd475598af946c557c922d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/97e142d716dd475598af946c557c922d 2024-11-22T19:22:25,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0f61ca35413445bdabd1e3cf5a6929c6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0f61ca35413445bdabd1e3cf5a6929c6 2024-11-22T19:22:25,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6caf296b355045d0953ce9744c8b46d4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6caf296b355045d0953ce9744c8b46d4 2024-11-22T19:22:25,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0edc6979b105441d9e797a6a6fe63d93 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/0edc6979b105441d9e797a6a6fe63d93 2024-11-22T19:22:25,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/2cd527e559a34db78cec016e5635c5ee to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/2cd527e559a34db78cec016e5635c5ee 2024-11-22T19:22:25,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6977b17a2034152b85cc0d27e944e73 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/f6977b17a2034152b85cc0d27e944e73 2024-11-22T19:22:25,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/53e72d3d82544fb3aaf6ceb71895c1e2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/53e72d3d82544fb3aaf6ceb71895c1e2 2024-11-22T19:22:25,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/49df3da54eab4e1294b3f9ded35f3a9c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/49df3da54eab4e1294b3f9ded35f3a9c 2024-11-22T19:22:25,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6fe5a469359445c08ce28e3574f4783f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/6fe5a469359445c08ce28e3574f4783f 2024-11-22T19:22:25,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/edcb543d63d14d59aacb3ad8bb75536a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/edcb543d63d14d59aacb3ad8bb75536a 2024-11-22T19:22:25,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/cd022d0976474bd6b12399462dfd110a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/cd022d0976474bd6b12399462dfd110a 2024-11-22T19:22:25,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d2dfa40889da4625aed515ebcadccd22 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d2dfa40889da4625aed515ebcadccd22 2024-11-22T19:22:25,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/79ab4f6311fd48e6b9e9c8bd2c2ac9a6 2024-11-22T19:22:25,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e730795fedc14152b4641cdbb564d47e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e730795fedc14152b4641cdbb564d47e 2024-11-22T19:22:25,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/bcdd9662c8664593bb7485a25c6b46cb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/bcdd9662c8664593bb7485a25c6b46cb 2024-11-22T19:22:25,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5508c4ea8e404fceb295feddf4a62ae5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/5508c4ea8e404fceb295feddf4a62ae5 2024-11-22T19:22:25,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/dd9a3db2b3694fd2bab90e6d1979bcb3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/dd9a3db2b3694fd2bab90e6d1979bcb3 2024-11-22T19:22:25,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b32db7f7869d42e582ea7b27130e1a17 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/b32db7f7869d42e582ea7b27130e1a17 2024-11-22T19:22:25,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/4c8bd770f1de4cb6a1effe593f827023 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/4c8bd770f1de4cb6a1effe593f827023 2024-11-22T19:22:25,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c764919fb1744319b1742f4c86265533 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/c764919fb1744319b1742f4c86265533 2024-11-22T19:22:25,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/264f8753a5ab44a8bdce804a619c54c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5c2fc7c78c5749949cc6ba32c7ec9ada, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3cab44855ebf4504ad8651a3e9fb71e8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fb24d566f81d46969786685cae1b5b9c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d261caab355c453cb64b8e264615d782, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/03535eb77cd74d20b5cc6aef2e3c630a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/780d7e2ac5e34d768ddf59d7801e891c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2b457d32c99f4c5fbd6fcc52cb1635e5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5419d037e70345588de36a0d6d3afd13, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e7d41d805f084902b5d767389bf1f7ca, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/604b53015461438697085951c6cd5bba, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ebe28ca992bc422ca016e42b22774933, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/10d26633a36240ceb1d3e8115e874a51, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9f70c166a1cc4ef7a9bb34fa9208b152, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4276f4ce807b410991af3913ea81d761, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/a6c7f94d9f604f458e8ff7249ebd1d52, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4056dd1fb96449dc81dfa118684e5e7a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/71994200674a48658ad970f8390f64f2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4be20bfb0b1d497b9106b1d7252fa021, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d37ca8d91c364bd3901d114700c30fe5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/17cdc321934947a1ab8e6e65d03a0658, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cce343efc3cd4d9eba3383105838df4b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/1cedb4d1b2fd4acc82685a3714d688b4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/8190bf0cebea4b96aa8a0b640e403b20, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ec006918df36498ea733cc1d93e81a86, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/6524d685565344bb94d26f22beddcb9e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3fe07008ca61475bab7bfd96a142a420, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d22dbda0ff4745bfb4358f2299036f02, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fce5bc871bc6481c994ba7f345273e18, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b2a84e4a65054fdb83b4ac20975cceb5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/0cd0e511a6734eedb2e2544428b3885d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/21874b59375449358dfa5412ea80c4fe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/702fe8e554374f86be8cc967be75b21d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9b512660de5349979605e08f486a4a11, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cef5713c7e1e4188892aa0bf74980bd4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/079998da92c343678c98eb98523bbb97, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3a91cad7ffed42e4a3e5387fa5a03847, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/56a8874616854397888b87d56932b65e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2700918f3a304b3fafb7c587f90ddf1e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/f413c9679db54e5abbee05a9919bac9e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d3359da25a77408192f6d9de037532e7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b0ad8f80569248dcb7e43e45f6d8bd2d] to archive 2024-11-22T19:22:25,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:22:25,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/264f8753a5ab44a8bdce804a619c54c1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/264f8753a5ab44a8bdce804a619c54c1 2024-11-22T19:22:25,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5c2fc7c78c5749949cc6ba32c7ec9ada to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5c2fc7c78c5749949cc6ba32c7ec9ada 2024-11-22T19:22:25,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3cab44855ebf4504ad8651a3e9fb71e8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3cab44855ebf4504ad8651a3e9fb71e8 2024-11-22T19:22:25,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fb24d566f81d46969786685cae1b5b9c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fb24d566f81d46969786685cae1b5b9c 2024-11-22T19:22:25,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d261caab355c453cb64b8e264615d782 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d261caab355c453cb64b8e264615d782 2024-11-22T19:22:25,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/03535eb77cd74d20b5cc6aef2e3c630a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/03535eb77cd74d20b5cc6aef2e3c630a 2024-11-22T19:22:25,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/780d7e2ac5e34d768ddf59d7801e891c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/780d7e2ac5e34d768ddf59d7801e891c 2024-11-22T19:22:25,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2b457d32c99f4c5fbd6fcc52cb1635e5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2b457d32c99f4c5fbd6fcc52cb1635e5 2024-11-22T19:22:25,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5419d037e70345588de36a0d6d3afd13 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/5419d037e70345588de36a0d6d3afd13 2024-11-22T19:22:25,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e7d41d805f084902b5d767389bf1f7ca to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e7d41d805f084902b5d767389bf1f7ca 2024-11-22T19:22:25,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/604b53015461438697085951c6cd5bba to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/604b53015461438697085951c6cd5bba 2024-11-22T19:22:25,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ebe28ca992bc422ca016e42b22774933 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ebe28ca992bc422ca016e42b22774933 2024-11-22T19:22:25,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/10d26633a36240ceb1d3e8115e874a51 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/10d26633a36240ceb1d3e8115e874a51 2024-11-22T19:22:25,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9f70c166a1cc4ef7a9bb34fa9208b152 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9f70c166a1cc4ef7a9bb34fa9208b152 2024-11-22T19:22:25,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4276f4ce807b410991af3913ea81d761 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4276f4ce807b410991af3913ea81d761 2024-11-22T19:22:25,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/a6c7f94d9f604f458e8ff7249ebd1d52 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/a6c7f94d9f604f458e8ff7249ebd1d52 2024-11-22T19:22:25,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4056dd1fb96449dc81dfa118684e5e7a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4056dd1fb96449dc81dfa118684e5e7a 2024-11-22T19:22:25,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/71994200674a48658ad970f8390f64f2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/71994200674a48658ad970f8390f64f2 2024-11-22T19:22:25,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4be20bfb0b1d497b9106b1d7252fa021 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/4be20bfb0b1d497b9106b1d7252fa021 2024-11-22T19:22:25,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d37ca8d91c364bd3901d114700c30fe5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d37ca8d91c364bd3901d114700c30fe5 2024-11-22T19:22:25,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/17cdc321934947a1ab8e6e65d03a0658 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/17cdc321934947a1ab8e6e65d03a0658 2024-11-22T19:22:25,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cce343efc3cd4d9eba3383105838df4b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cce343efc3cd4d9eba3383105838df4b 2024-11-22T19:22:25,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/1cedb4d1b2fd4acc82685a3714d688b4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/1cedb4d1b2fd4acc82685a3714d688b4 2024-11-22T19:22:25,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/8190bf0cebea4b96aa8a0b640e403b20 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/8190bf0cebea4b96aa8a0b640e403b20 2024-11-22T19:22:25,743 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ec006918df36498ea733cc1d93e81a86 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/ec006918df36498ea733cc1d93e81a86 2024-11-22T19:22:25,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/6524d685565344bb94d26f22beddcb9e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/6524d685565344bb94d26f22beddcb9e 2024-11-22T19:22:25,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3fe07008ca61475bab7bfd96a142a420 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3fe07008ca61475bab7bfd96a142a420 2024-11-22T19:22:25,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d22dbda0ff4745bfb4358f2299036f02 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d22dbda0ff4745bfb4358f2299036f02 2024-11-22T19:22:25,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fce5bc871bc6481c994ba7f345273e18 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/fce5bc871bc6481c994ba7f345273e18 2024-11-22T19:22:25,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b2a84e4a65054fdb83b4ac20975cceb5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b2a84e4a65054fdb83b4ac20975cceb5 2024-11-22T19:22:25,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/0cd0e511a6734eedb2e2544428b3885d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/0cd0e511a6734eedb2e2544428b3885d 2024-11-22T19:22:25,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/21874b59375449358dfa5412ea80c4fe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/21874b59375449358dfa5412ea80c4fe 2024-11-22T19:22:25,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/702fe8e554374f86be8cc967be75b21d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/702fe8e554374f86be8cc967be75b21d 2024-11-22T19:22:25,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9b512660de5349979605e08f486a4a11 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/9b512660de5349979605e08f486a4a11 2024-11-22T19:22:25,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cef5713c7e1e4188892aa0bf74980bd4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/cef5713c7e1e4188892aa0bf74980bd4 2024-11-22T19:22:25,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/079998da92c343678c98eb98523bbb97 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/079998da92c343678c98eb98523bbb97 2024-11-22T19:22:25,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3a91cad7ffed42e4a3e5387fa5a03847 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/3a91cad7ffed42e4a3e5387fa5a03847 2024-11-22T19:22:25,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/56a8874616854397888b87d56932b65e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/56a8874616854397888b87d56932b65e 2024-11-22T19:22:25,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2700918f3a304b3fafb7c587f90ddf1e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2700918f3a304b3fafb7c587f90ddf1e 2024-11-22T19:22:25,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/f413c9679db54e5abbee05a9919bac9e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/f413c9679db54e5abbee05a9919bac9e 2024-11-22T19:22:25,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d3359da25a77408192f6d9de037532e7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/d3359da25a77408192f6d9de037532e7 2024-11-22T19:22:25,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b0ad8f80569248dcb7e43e45f6d8bd2d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/b0ad8f80569248dcb7e43e45f6d8bd2d 2024-11-22T19:22:25,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/ce2be98c33e946e6b1fa17fbf3756864, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/74d30a8dd3924e85b7beb5378b20a163, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/25e7047a890f4cf5a8455cf1a660633e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/32ee401a855e47c2ab9bb79bca56d04c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2ccdee5e3b35401a947028c6449a2cc8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/aa63b5cbc50342ec95c65320139703f1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0c3ccd21adbf4ec88cf33b8bf8312b33, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/34985edc155c416c95c4d95fe4b7f599, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/813e114eada04a65999f6dfedde05c44, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0acb4e7893774a508eb13dedba3d38e2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/344ce40fe0bf48f3bb6b9b56d29e0f27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/af215b74a31640848f618417ed8a4fad, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b9c03510318443ecb7d8d4637f3368f3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/864d0380133f4f4d935aeda7dc7702c5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2a2a2fd8861945739033bdbbf4e0ff21, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/145586c3e9674daaba1b4a9d99527d1b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e63c69e1ce914c4aa76d337eaf864ece, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b3fe6a255d0d406bbabbca4d351b4190, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/877dc1cef811436386ce43fdae8af642, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e9f49ff83d6c43ca979084d3c9d2df40, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ad6f2652b0d445b9a39a302490cb199, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/6283aef5c7f54cf0ae1d721052bb6595, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b6c4ea8112fd4fedb6dfd74f3bf45567, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/51bc1592ab494107b924713cbfcca58a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/13889243d1e746cfb9d3ef86c5b2c691, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4c6c42cff3cf4cba92f837dcb39eaa6b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4a018656809e4140b11d561505aee5b6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d387318ce3304ffb9f7897c28f7c2c0b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/5c635280bc1445e9876b5ba601d01e58, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e4338388529441d384f70064e8af658a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/3b104b457f4443f7a98a980ec0d343da, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e21c6e1f7fec42fbb764f269e02f4db9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/259b99e68e364d5195388bc8453856fa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bbf6ea1f0c70494596b34316a26f662a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b68c16528b3749bf99b5d213902f27dc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d17a79a8ba9c45aca44c37e9d083c38e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7a9a4a153ae34f319a8f24f09c120fff, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/84a85a59bae645f2b8d06f16ef0af3bd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/99a99506937a46f6acf6fb22f7922455, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/9ccb34fb81a7407cb0a6760e30f0eb85, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2006de7059734db9b72f568d72f44e17, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ee2443c1a7c48c4bb3edf376e4c2fdc] to archive 2024-11-22T19:22:25,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:22:25,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/ce2be98c33e946e6b1fa17fbf3756864 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/ce2be98c33e946e6b1fa17fbf3756864 2024-11-22T19:22:25,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/74d30a8dd3924e85b7beb5378b20a163 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/74d30a8dd3924e85b7beb5378b20a163 2024-11-22T19:22:25,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/25e7047a890f4cf5a8455cf1a660633e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/25e7047a890f4cf5a8455cf1a660633e 2024-11-22T19:22:25,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/32ee401a855e47c2ab9bb79bca56d04c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/32ee401a855e47c2ab9bb79bca56d04c 2024-11-22T19:22:25,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2ccdee5e3b35401a947028c6449a2cc8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2ccdee5e3b35401a947028c6449a2cc8 2024-11-22T19:22:25,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/aa63b5cbc50342ec95c65320139703f1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/aa63b5cbc50342ec95c65320139703f1 2024-11-22T19:22:25,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0c3ccd21adbf4ec88cf33b8bf8312b33 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0c3ccd21adbf4ec88cf33b8bf8312b33 2024-11-22T19:22:25,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/34985edc155c416c95c4d95fe4b7f599 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/34985edc155c416c95c4d95fe4b7f599 2024-11-22T19:22:25,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/813e114eada04a65999f6dfedde05c44 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/813e114eada04a65999f6dfedde05c44 2024-11-22T19:22:25,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0acb4e7893774a508eb13dedba3d38e2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/0acb4e7893774a508eb13dedba3d38e2 2024-11-22T19:22:25,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/344ce40fe0bf48f3bb6b9b56d29e0f27 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/344ce40fe0bf48f3bb6b9b56d29e0f27 2024-11-22T19:22:25,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/af215b74a31640848f618417ed8a4fad to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/af215b74a31640848f618417ed8a4fad 2024-11-22T19:22:25,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b9c03510318443ecb7d8d4637f3368f3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b9c03510318443ecb7d8d4637f3368f3 2024-11-22T19:22:25,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/864d0380133f4f4d935aeda7dc7702c5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/864d0380133f4f4d935aeda7dc7702c5 2024-11-22T19:22:25,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2a2a2fd8861945739033bdbbf4e0ff21 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2a2a2fd8861945739033bdbbf4e0ff21 2024-11-22T19:22:25,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/145586c3e9674daaba1b4a9d99527d1b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/145586c3e9674daaba1b4a9d99527d1b 2024-11-22T19:22:25,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e63c69e1ce914c4aa76d337eaf864ece to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e63c69e1ce914c4aa76d337eaf864ece 2024-11-22T19:22:25,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b3fe6a255d0d406bbabbca4d351b4190 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b3fe6a255d0d406bbabbca4d351b4190 2024-11-22T19:22:25,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/877dc1cef811436386ce43fdae8af642 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/877dc1cef811436386ce43fdae8af642 2024-11-22T19:22:25,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e9f49ff83d6c43ca979084d3c9d2df40 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e9f49ff83d6c43ca979084d3c9d2df40 2024-11-22T19:22:25,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ad6f2652b0d445b9a39a302490cb199 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ad6f2652b0d445b9a39a302490cb199 2024-11-22T19:22:25,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/6283aef5c7f54cf0ae1d721052bb6595 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/6283aef5c7f54cf0ae1d721052bb6595 2024-11-22T19:22:25,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b6c4ea8112fd4fedb6dfd74f3bf45567 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b6c4ea8112fd4fedb6dfd74f3bf45567 2024-11-22T19:22:25,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/51bc1592ab494107b924713cbfcca58a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/51bc1592ab494107b924713cbfcca58a 2024-11-22T19:22:25,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/13889243d1e746cfb9d3ef86c5b2c691 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/13889243d1e746cfb9d3ef86c5b2c691 2024-11-22T19:22:25,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4c6c42cff3cf4cba92f837dcb39eaa6b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4c6c42cff3cf4cba92f837dcb39eaa6b 2024-11-22T19:22:25,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4a018656809e4140b11d561505aee5b6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/4a018656809e4140b11d561505aee5b6 2024-11-22T19:22:25,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d387318ce3304ffb9f7897c28f7c2c0b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d387318ce3304ffb9f7897c28f7c2c0b 2024-11-22T19:22:25,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/5c635280bc1445e9876b5ba601d01e58 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/5c635280bc1445e9876b5ba601d01e58 2024-11-22T19:22:25,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e4338388529441d384f70064e8af658a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e4338388529441d384f70064e8af658a 2024-11-22T19:22:25,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/3b104b457f4443f7a98a980ec0d343da to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/3b104b457f4443f7a98a980ec0d343da 2024-11-22T19:22:25,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e21c6e1f7fec42fbb764f269e02f4db9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/e21c6e1f7fec42fbb764f269e02f4db9 2024-11-22T19:22:25,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/259b99e68e364d5195388bc8453856fa to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/259b99e68e364d5195388bc8453856fa 2024-11-22T19:22:25,855 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bbf6ea1f0c70494596b34316a26f662a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bbf6ea1f0c70494596b34316a26f662a 2024-11-22T19:22:25,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b68c16528b3749bf99b5d213902f27dc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/b68c16528b3749bf99b5d213902f27dc 2024-11-22T19:22:25,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d17a79a8ba9c45aca44c37e9d083c38e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/d17a79a8ba9c45aca44c37e9d083c38e 2024-11-22T19:22:25,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7a9a4a153ae34f319a8f24f09c120fff to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7a9a4a153ae34f319a8f24f09c120fff 2024-11-22T19:22:25,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/84a85a59bae645f2b8d06f16ef0af3bd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/84a85a59bae645f2b8d06f16ef0af3bd 2024-11-22T19:22:25,862 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/99a99506937a46f6acf6fb22f7922455 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/99a99506937a46f6acf6fb22f7922455 2024-11-22T19:22:25,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/9ccb34fb81a7407cb0a6760e30f0eb85 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/9ccb34fb81a7407cb0a6760e30f0eb85 2024-11-22T19:22:25,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2006de7059734db9b72f568d72f44e17 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/2006de7059734db9b72f568d72f44e17 2024-11-22T19:22:25,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ee2443c1a7c48c4bb3edf376e4c2fdc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/7ee2443c1a7c48c4bb3edf376e4c2fdc 2024-11-22T19:22:25,871 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/recovered.edits/606.seqid, newMaxSeqId=606, maxSeqId=1 2024-11-22T19:22:25,874 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e. 2024-11-22T19:22:25,874 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 686ebaaf5a8e3b2d28eef9abb3c2302e: 2024-11-22T19:22:25,876 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:25,877 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=686ebaaf5a8e3b2d28eef9abb3c2302e, regionState=CLOSED 2024-11-22T19:22:25,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-22T19:22:25,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 686ebaaf5a8e3b2d28eef9abb3c2302e, server=a307a1377457,35917,1732303314657 in 1.7550 sec 2024-11-22T19:22:25,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-11-22T19:22:25,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=686ebaaf5a8e3b2d28eef9abb3c2302e, UNASSIGN in 1.7600 sec 2024-11-22T19:22:25,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-22T19:22:25,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7680 sec 2024-11-22T19:22:25,885 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303345884"}]},"ts":"1732303345884"} 2024-11-22T19:22:25,886 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T19:22:25,889 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T19:22:25,891 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7890 sec 2024-11-22T19:22:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T19:22:26,213 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-22T19:22:26,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T19:22:26,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,225 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-22T19:22:26,229 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,236 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:26,241 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/recovered.edits] 2024-11-22T19:22:26,247 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/068ab461cfcc4f26bfcc731289a52dd0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/068ab461cfcc4f26bfcc731289a52dd0 2024-11-22T19:22:26,251 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d99ccdabca4c4e69ab8a9a27a7c2430e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/d99ccdabca4c4e69ab8a9a27a7c2430e 2024-11-22T19:22:26,254 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e0dc53652eb5431c996716e7f74794b5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/A/e0dc53652eb5431c996716e7f74794b5 2024-11-22T19:22:26,259 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2c6a1d5359c94289bff44d6156bd6971 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/2c6a1d5359c94289bff44d6156bd6971 2024-11-22T19:22:26,264 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/75ee36057f44460a9d161701baba9bda to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/75ee36057f44460a9d161701baba9bda 2024-11-22T19:22:26,267 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e6e10d44a1954640883a2523ac77d671 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/B/e6e10d44a1954640883a2523ac77d671 2024-11-22T19:22:26,270 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/21da0b438b994e5da1694902a4942e38 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/21da0b438b994e5da1694902a4942e38 2024-11-22T19:22:26,271 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/a7e630814c6d4ca1acc59da78ca57045 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/a7e630814c6d4ca1acc59da78ca57045 2024-11-22T19:22:26,275 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bccfd48ad82746a69798cb4d035620ef to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/C/bccfd48ad82746a69798cb4d035620ef 2024-11-22T19:22:26,287 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/recovered.edits/606.seqid to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e/recovered.edits/606.seqid 2024-11-22T19:22:26,288 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/686ebaaf5a8e3b2d28eef9abb3c2302e 2024-11-22T19:22:26,289 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T19:22:26,296 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-22T19:22:26,328 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T19:22:26,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-22T19:22:26,383 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T19:22:26,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T19:22:26,385 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732303346385"}]},"ts":"9223372036854775807"} 2024-11-22T19:22:26,389 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T19:22:26,389 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 686ebaaf5a8e3b2d28eef9abb3c2302e, NAME => 'TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T19:22:26,389 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T19:22:26,389 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732303346389"}]},"ts":"9223372036854775807"} 2024-11-22T19:22:26,392 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T19:22:26,395 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 177 msec 2024-11-22T19:22:26,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-22T19:22:26,538 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-22T19:22:26,551 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 218) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/a307a1377457:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;a307a1377457:35917-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-647651687_22 at /127.0.0.1:43510 [Waiting for operation #430] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/a307a1377457:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1527644496_22 at /127.0.0.1:43336 [Waiting for operation #432] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=632 (was 399) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4353 (was 4871) 2024-11-22T19:22:26,562 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=632, ProcessCount=11, AvailableMemoryMB=4353 2024-11-22T19:22:26,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:22:26,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:22:26,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:26,569 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:22:26,569 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:26,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-11-22T19:22:26,570 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:22:26,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-22T19:22:26,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741974_1150 (size=963) 2024-11-22T19:22:26,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-22T19:22:26,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-22T19:22:26,997 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:22:27,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741975_1151 (size=53) 2024-11-22T19:22:27,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-22T19:22:27,405 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:27,405 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7de534ec4ea5964284edbc5ae1079040, disabling compactions & flushes 2024-11-22T19:22:27,405 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,405 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,405 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. after waiting 0 ms 2024-11-22T19:22:27,405 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,405 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,405 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:27,407 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:22:27,407 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732303347407"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303347407"}]},"ts":"1732303347407"} 2024-11-22T19:22:27,409 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:22:27,410 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:22:27,411 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303347410"}]},"ts":"1732303347410"} 2024-11-22T19:22:27,412 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T19:22:27,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, ASSIGN}] 2024-11-22T19:22:27,421 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, ASSIGN 2024-11-22T19:22:27,422 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:22:27,572 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:27,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-22T19:22:27,726 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:27,731 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,731 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:22:27,731 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,732 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:27,732 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,732 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,734 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,736 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:27,736 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7de534ec4ea5964284edbc5ae1079040 columnFamilyName A 2024-11-22T19:22:27,736 DEBUG [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:27,737 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(327): Store=7de534ec4ea5964284edbc5ae1079040/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:27,737 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,739 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:27,740 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7de534ec4ea5964284edbc5ae1079040 columnFamilyName B 2024-11-22T19:22:27,741 DEBUG [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:27,741 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(327): Store=7de534ec4ea5964284edbc5ae1079040/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:27,741 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,742 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:27,743 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7de534ec4ea5964284edbc5ae1079040 columnFamilyName C 2024-11-22T19:22:27,743 DEBUG [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:27,743 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(327): Store=7de534ec4ea5964284edbc5ae1079040/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:27,743 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,744 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,745 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,747 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:22:27,749 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:27,752 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:22:27,752 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 7de534ec4ea5964284edbc5ae1079040; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73033328, jitterRate=0.08828139305114746}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:22:27,754 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:27,755 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., pid=39, masterSystemTime=1732303347726 2024-11-22T19:22:27,757 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,757 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:27,758 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:27,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-22T19:22:27,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 in 185 msec 2024-11-22T19:22:27,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-22T19:22:27,764 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, ASSIGN in 342 msec 2024-11-22T19:22:27,764 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:22:27,765 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303347764"}]},"ts":"1732303347764"} 2024-11-22T19:22:27,766 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T19:22:27,769 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:22:27,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2040 sec 2024-11-22T19:22:28,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-22T19:22:28,679 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-11-22T19:22:28,681 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ff872d8 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4506927 2024-11-22T19:22:28,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a9b9802, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:28,686 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:28,688 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:28,690 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:22:28,692 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:22:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:22:28,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:22:28,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:28,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741976_1152 (size=999) 2024-11-22T19:22:29,116 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-22T19:22:29,116 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-22T19:22:29,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:22:29,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, REOPEN/MOVE}] 2024-11-22T19:22:29,130 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, REOPEN/MOVE 2024-11-22T19:22:29,130 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,131 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:22:29,131 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:29,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:29,284 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,284 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:22:29,284 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 7de534ec4ea5964284edbc5ae1079040, disabling compactions & flushes 2024-11-22T19:22:29,284 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,284 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,284 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. after waiting 0 ms 2024-11-22T19:22:29,284 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,288 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-22T19:22:29,289 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,289 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:29,289 WARN [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 7de534ec4ea5964284edbc5ae1079040 to self. 2024-11-22T19:22:29,291 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,291 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=CLOSED 2024-11-22T19:22:29,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-22T19:22:29,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 in 161 msec 2024-11-22T19:22:29,295 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, REOPEN/MOVE; state=CLOSED, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=true 2024-11-22T19:22:29,445 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:29,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:29,602 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,603 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:22:29,603 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,603 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:29,603 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,603 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,607 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,608 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:29,613 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7de534ec4ea5964284edbc5ae1079040 columnFamilyName A 2024-11-22T19:22:29,615 DEBUG [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:29,615 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(327): Store=7de534ec4ea5964284edbc5ae1079040/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:29,616 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,617 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:29,617 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7de534ec4ea5964284edbc5ae1079040 columnFamilyName B 2024-11-22T19:22:29,617 DEBUG [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:29,617 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(327): Store=7de534ec4ea5964284edbc5ae1079040/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:29,617 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,618 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:29,618 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7de534ec4ea5964284edbc5ae1079040 columnFamilyName C 2024-11-22T19:22:29,618 DEBUG [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:29,618 INFO [StoreOpener-7de534ec4ea5964284edbc5ae1079040-1 {}] regionserver.HStore(327): Store=7de534ec4ea5964284edbc5ae1079040/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:29,618 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,619 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,620 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,621 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:22:29,622 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,623 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 7de534ec4ea5964284edbc5ae1079040; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72816739, jitterRate=0.08505396544933319}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:22:29,624 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:29,625 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., pid=44, masterSystemTime=1732303349599 2024-11-22T19:22:29,627 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=OPEN, openSeqNum=5, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,627 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,627 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-11-22T19:22:29,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 in 181 msec 2024-11-22T19:22:29,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-22T19:22:29,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, REOPEN/MOVE in 500 msec 2024-11-22T19:22:29,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-22T19:22:29,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 513 msec 2024-11-22T19:22:29,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 935 msec 2024-11-22T19:22:29,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-22T19:22:29,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-11-22T19:22:29,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,653 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-11-22T19:22:29,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-11-22T19:22:29,661 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,662 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-11-22T19:22:29,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,666 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-11-22T19:22:29,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-22T19:22:29,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,673 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-22T19:22:29,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,678 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-22T19:22:29,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,682 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-11-22T19:22:29,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:29,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-22T19:22:29,690 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-22T19:22:29,691 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:29,691 DEBUG [hconnection-0x266405ab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:29,693 DEBUG [hconnection-0x7abdb9aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,693 DEBUG [hconnection-0x1b80bd1e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,693 DEBUG [hconnection-0x6fe45cb1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,694 DEBUG [hconnection-0x307eefc5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,695 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,695 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,695 DEBUG [hconnection-0x452cbbe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,695 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,696 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,696 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,696 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,696 DEBUG [hconnection-0x3795ec0e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,697 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,697 DEBUG [hconnection-0x167610df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,698 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,700 DEBUG [hconnection-0x42b4d2a0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:29,702 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:29,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:29,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229a1c800b63984f8899e2871392b0829f_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303349708/Put/seqid=0 2024-11-22T19:22:29,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303409775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303409778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303409782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741977_1153 (size=14594) 2024-11-22T19:22:29,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303409784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303409787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-22T19:22:29,792 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:29,798 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229a1c800b63984f8899e2871392b0829f_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229a1c800b63984f8899e2871392b0829f_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:29,799 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/c4287c8cadb84e19a9190c30d12c872c, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:29,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/c4287c8cadb84e19a9190c30d12c872c is 175, key is test_row_0/A:col10/1732303349708/Put/seqid=0 2024-11-22T19:22:29,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741978_1154 (size=39549) 2024-11-22T19:22:29,823 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/c4287c8cadb84e19a9190c30d12c872c 2024-11-22T19:22:29,843 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:29,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:29,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:29,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/246682c29fe84b60aa60e104611b64d5 is 50, key is test_row_0/B:col10/1732303349708/Put/seqid=0 2024-11-22T19:22:29,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741979_1155 (size=12001) 2024-11-22T19:22:29,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303409890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303409890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303409891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303409891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303409891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:29,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-22T19:22:29,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:29,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:29,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:29,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:29,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:29,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303410096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303410097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303410098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303410098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303410099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,151 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:30,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/246682c29fe84b60aa60e104611b64d5 2024-11-22T19:22:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-22T19:22:30,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:30,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:30,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:30,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/4e68a64920954d9dbf1df3e52623b674 is 50, key is test_row_0/C:col10/1732303349708/Put/seqid=0 2024-11-22T19:22:30,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741980_1156 (size=12001) 2024-11-22T19:22:30,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/4e68a64920954d9dbf1df3e52623b674 2024-11-22T19:22:30,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/c4287c8cadb84e19a9190c30d12c872c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c 2024-11-22T19:22:30,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c, entries=200, sequenceid=16, filesize=38.6 K 2024-11-22T19:22:30,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/246682c29fe84b60aa60e104611b64d5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/246682c29fe84b60aa60e104611b64d5 2024-11-22T19:22:30,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/246682c29fe84b60aa60e104611b64d5, entries=150, sequenceid=16, filesize=11.7 K 2024-11-22T19:22:30,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/4e68a64920954d9dbf1df3e52623b674 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/4e68a64920954d9dbf1df3e52623b674 2024-11-22T19:22:30,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/4e68a64920954d9dbf1df3e52623b674, entries=150, sequenceid=16, filesize=11.7 K 2024-11-22T19:22:30,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7de534ec4ea5964284edbc5ae1079040 in 659ms, sequenceid=16, compaction requested=false 2024-11-22T19:22:30,371 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-22T19:22:30,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:30,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:30,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:30,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:30,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:30,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:30,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:30,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:30,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303410410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303410412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303410413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303410416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303410418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cdc169c48f6f40e8a15c5d18f0339f72_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303349779/Put/seqid=0 2024-11-22T19:22:30,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:30,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:30,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:30,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741981_1157 (size=14594) 2024-11-22T19:22:30,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303410520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303410520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303410521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303410525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303410525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,614 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:30,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:30,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:30,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,683 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T19:22:30,685 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T19:22:30,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303410723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303410724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303410725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303410728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303410731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:30,767 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:30,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:30,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:30,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-22T19:22:30,878 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:30,885 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cdc169c48f6f40e8a15c5d18f0339f72_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdc169c48f6f40e8a15c5d18f0339f72_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:30,886 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/56a8e5cf3ece45b9a2dfb37945618dfb, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:30,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/56a8e5cf3ece45b9a2dfb37945618dfb is 175, key is test_row_0/A:col10/1732303349779/Put/seqid=0 2024-11-22T19:22:30,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741982_1158 (size=39549) 2024-11-22T19:22:30,917 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/56a8e5cf3ece45b9a2dfb37945618dfb 2024-11-22T19:22:30,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:30,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:30,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:30,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:30,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:30,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/ad4dc90a25d84c1d8b310f5967a9b294 is 50, key is test_row_0/B:col10/1732303349779/Put/seqid=0 2024-11-22T19:22:30,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741983_1159 (size=12001) 2024-11-22T19:22:30,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/ad4dc90a25d84c1d8b310f5967a9b294 2024-11-22T19:22:30,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0e4c26ae438442bdb73d5988945d3f68 is 50, key is test_row_0/C:col10/1732303349779/Put/seqid=0 2024-11-22T19:22:31,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741984_1160 (size=12001) 2024-11-22T19:22:31,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0e4c26ae438442bdb73d5988945d3f68 2024-11-22T19:22:31,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303411028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303411028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/56a8e5cf3ece45b9a2dfb37945618dfb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb 2024-11-22T19:22:31,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303411030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303411033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303411034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb, entries=200, sequenceid=41, filesize=38.6 K 2024-11-22T19:22:31,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/ad4dc90a25d84c1d8b310f5967a9b294 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad4dc90a25d84c1d8b310f5967a9b294 2024-11-22T19:22:31,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad4dc90a25d84c1d8b310f5967a9b294, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T19:22:31,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0e4c26ae438442bdb73d5988945d3f68 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e4c26ae438442bdb73d5988945d3f68 2024-11-22T19:22:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e4c26ae438442bdb73d5988945d3f68, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T19:22:31,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7de534ec4ea5964284edbc5ae1079040 in 655ms, sequenceid=41, compaction requested=false 2024-11-22T19:22:31,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:31,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:31,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-22T19:22:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,075 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d93ceb2de759420e827d9479442d3fd7_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303350413/Put/seqid=0 2024-11-22T19:22:31,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741985_1161 (size=9714) 2024-11-22T19:22:31,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,413 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T19:22:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,518 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d93ceb2de759420e827d9479442d3fd7_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d93ceb2de759420e827d9479442d3fd7_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:31,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d2dbcc17a124405c88d8e34d37e6c287, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:31,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d2dbcc17a124405c88d8e34d37e6c287 is 175, key is test_row_0/A:col10/1732303350413/Put/seqid=0 2024-11-22T19:22:31,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741986_1162 (size=22361) 2024-11-22T19:22:31,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,529 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d2dbcc17a124405c88d8e34d37e6c287 2024-11-22T19:22:31,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:31,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:31,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/2556c261bfea46babb30d0ece8d29881 is 50, key is test_row_0/B:col10/1732303350413/Put/seqid=0 2024-11-22T19:22:31,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741987_1163 (size=9657) 2024-11-22T19:22:31,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,554 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/2556c261bfea46babb30d0ece8d29881 2024-11-22T19:22:31,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/46825ad2dec845048f6478348a843c30 is 50, key is test_row_0/C:col10/1732303350413/Put/seqid=0 2024-11-22T19:22:31,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303411571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303411574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303411576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303411575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303411577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741988_1164 (size=9657) 2024-11-22T19:22:31,592 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/46825ad2dec845048f6478348a843c30 2024-11-22T19:22:31,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d2dbcc17a124405c88d8e34d37e6c287 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287 2024-11-22T19:22:31,606 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287, entries=100, sequenceid=52, filesize=21.8 K 2024-11-22T19:22:31,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/2556c261bfea46babb30d0ece8d29881 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2556c261bfea46babb30d0ece8d29881 2024-11-22T19:22:31,616 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2556c261bfea46babb30d0ece8d29881, entries=100, sequenceid=52, filesize=9.4 K 2024-11-22T19:22:31,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/46825ad2dec845048f6478348a843c30 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/46825ad2dec845048f6478348a843c30 2024-11-22T19:22:31,629 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/46825ad2dec845048f6478348a843c30, entries=100, sequenceid=52, filesize=9.4 K 2024-11-22T19:22:31,631 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7de534ec4ea5964284edbc5ae1079040 in 556ms, sequenceid=52, compaction requested=true 2024-11-22T19:22:31,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:31,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-22T19:22:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-22T19:22:31,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-22T19:22:31,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9430 sec 2024-11-22T19:22:31,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.9480 sec 2024-11-22T19:22:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:31,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T19:22:31,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:31,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:31,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:31,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303411688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303411688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303411689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303411691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303411691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122488ebecd78824cf9905b380449629eba_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741989_1165 (size=12154) 2024-11-22T19:22:31,708 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:31,714 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122488ebecd78824cf9905b380449629eba_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122488ebecd78824cf9905b380449629eba_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:31,716 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d655bb7d6af04cf8b1d088d08592c12f, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:31,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d655bb7d6af04cf8b1d088d08592c12f is 175, key is test_row_0/A:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741990_1166 (size=30955) 2024-11-22T19:22:31,729 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d655bb7d6af04cf8b1d088d08592c12f 2024-11-22T19:22:31,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/2b44e321cd534844ac717483eaf16513 is 50, key is test_row_0/B:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741991_1167 (size=12001) 2024-11-22T19:22:31,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/2b44e321cd534844ac717483eaf16513 2024-11-22T19:22:31,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9c69aac7e2404c4badfad54968d8901d is 50, key is test_row_0/C:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303411794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-22T19:22:31,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,797 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-22T19:22:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303411794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:31,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303411795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303411797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741992_1168 (size=12001) 2024-11-22T19:22:31,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303411798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-22T19:22:31,803 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:31,804 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T19:22:31,804 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:31,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9c69aac7e2404c4badfad54968d8901d 2024-11-22T19:22:31,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/d655bb7d6af04cf8b1d088d08592c12f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f 2024-11-22T19:22:31,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f, entries=150, sequenceid=79, filesize=30.2 K 2024-11-22T19:22:31,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/2b44e321cd534844ac717483eaf16513 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2b44e321cd534844ac717483eaf16513 2024-11-22T19:22:31,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2b44e321cd534844ac717483eaf16513, entries=150, sequenceid=79, filesize=11.7 K 2024-11-22T19:22:31,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9c69aac7e2404c4badfad54968d8901d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9c69aac7e2404c4badfad54968d8901d 2024-11-22T19:22:31,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9c69aac7e2404c4badfad54968d8901d, entries=150, sequenceid=79, filesize=11.7 K 2024-11-22T19:22:31,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 7de534ec4ea5964284edbc5ae1079040 in 156ms, sequenceid=79, compaction requested=true 2024-11-22T19:22:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:31,836 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:31,836 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:31,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:31,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:31,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:31,839 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:31,839 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:31,839 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,839 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=129.3 K 2024-11-22T19:22:31,839 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,840 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f] 2024-11-22T19:22:31,840 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:31,840 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:31,841 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,841 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/246682c29fe84b60aa60e104611b64d5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad4dc90a25d84c1d8b310f5967a9b294, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2556c261bfea46babb30d0ece8d29881, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2b44e321cd534844ac717483eaf16513] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=44.6 K 2024-11-22T19:22:31,841 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4287c8cadb84e19a9190c30d12c872c, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732303349707 2024-11-22T19:22:31,841 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 246682c29fe84b60aa60e104611b64d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732303349708 2024-11-22T19:22:31,841 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56a8e5cf3ece45b9a2dfb37945618dfb, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732303349745 2024-11-22T19:22:31,842 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ad4dc90a25d84c1d8b310f5967a9b294, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732303349745 2024-11-22T19:22:31,842 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2dbcc17a124405c88d8e34d37e6c287, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732303350413 2024-11-22T19:22:31,842 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2556c261bfea46babb30d0ece8d29881, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732303350413 2024-11-22T19:22:31,842 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d655bb7d6af04cf8b1d088d08592c12f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732303351569 2024-11-22T19:22:31,843 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b44e321cd534844ac717483eaf16513, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732303351569 2024-11-22T19:22:31,860 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:31,874 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#148 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:31,876 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112234db7337bad94eeab837b74aa906aaf1_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:31,880 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112234db7337bad94eeab837b74aa906aaf1_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:31,880 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112234db7337bad94eeab837b74aa906aaf1_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:31,883 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/e764817545544566812a601c1ab4ca02 is 50, key is test_row_0/B:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T19:22:31,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741994_1170 (size=4469) 2024-11-22T19:22:31,919 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#147 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:31,922 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/847941e62f1d4794b8929260e018d016 is 175, key is test_row_0/A:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741993_1169 (size=12139) 2024-11-22T19:22:31,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741995_1171 (size=31093) 2024-11-22T19:22:31,935 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/e764817545544566812a601c1ab4ca02 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e764817545544566812a601c1ab4ca02 2024-11-22T19:22:31,944 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into e764817545544566812a601c1ab4ca02(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:31,944 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:31,944 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=12, startTime=1732303351836; duration=0sec 2024-11-22T19:22:31,944 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:31,944 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:31,944 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:31,947 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:31,947 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:31,947 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,947 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/4e68a64920954d9dbf1df3e52623b674, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e4c26ae438442bdb73d5988945d3f68, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/46825ad2dec845048f6478348a843c30, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9c69aac7e2404c4badfad54968d8901d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=44.6 K 2024-11-22T19:22:31,948 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e68a64920954d9dbf1df3e52623b674, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732303349708 2024-11-22T19:22:31,948 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e4c26ae438442bdb73d5988945d3f68, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732303349745 2024-11-22T19:22:31,949 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 46825ad2dec845048f6478348a843c30, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732303350413 2024-11-22T19:22:31,951 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c69aac7e2404c4badfad54968d8901d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732303351569 2024-11-22T19:22:31,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:31,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:31,958 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:31,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:31,973 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#149 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:31,974 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/b35792afbddc43e6b4d387a579b9a740 is 50, key is test_row_0/C:col10/1732303351569/Put/seqid=0 2024-11-22T19:22:31,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f12f239d9ca74425b897985d85b56603_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303351689/Put/seqid=0 2024-11-22T19:22:32,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:32,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741996_1172 (size=12139) 2024-11-22T19:22:32,013 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/b35792afbddc43e6b4d387a579b9a740 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/b35792afbddc43e6b4d387a579b9a740 2024-11-22T19:22:32,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741997_1173 (size=12154) 2024-11-22T19:22:32,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:32,025 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into b35792afbddc43e6b4d387a579b9a740(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:32,025 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:32,026 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=12, startTime=1732303351837; duration=0sec 2024-11-22T19:22:32,026 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:32,026 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:32,029 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f12f239d9ca74425b897985d85b56603_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f12f239d9ca74425b897985d85b56603_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ec21766d595141f28e3dd3d947fcf7ba, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:32,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ec21766d595141f28e3dd3d947fcf7ba is 175, key is test_row_0/A:col10/1732303351689/Put/seqid=0 2024-11-22T19:22:32,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303412031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303412031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303412035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303412037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303412038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741998_1174 (size=30955) 2024-11-22T19:22:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T19:22:32,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303412138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303412138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303412140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303412142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303412143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,343 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/847941e62f1d4794b8929260e018d016 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/847941e62f1d4794b8929260e018d016 2024-11-22T19:22:32,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303412342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303412343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303412344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303412345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303412345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,352 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into 847941e62f1d4794b8929260e018d016(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:32,352 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:32,352 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=12, startTime=1732303351836; duration=0sec 2024-11-22T19:22:32,352 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:32,352 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:32,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T19:22:32,450 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=89, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ec21766d595141f28e3dd3d947fcf7ba 2024-11-22T19:22:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/cf6a3dc0af6141fc8e08e5061fcb3870 is 50, key is test_row_0/B:col10/1732303351689/Put/seqid=0 2024-11-22T19:22:32,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741999_1175 (size=12001) 2024-11-22T19:22:32,481 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/cf6a3dc0af6141fc8e08e5061fcb3870 2024-11-22T19:22:32,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cd7c343690ce4263a4936ebe8b26c602 is 50, key is test_row_0/C:col10/1732303351689/Put/seqid=0 2024-11-22T19:22:32,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742000_1176 (size=12001) 2024-11-22T19:22:32,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303412647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303412648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303412649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303412650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:32,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303412651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:32,905 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cd7c343690ce4263a4936ebe8b26c602 2024-11-22T19:22:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T19:22:32,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ec21766d595141f28e3dd3d947fcf7ba as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba 2024-11-22T19:22:32,923 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba, entries=150, sequenceid=89, filesize=30.2 K 2024-11-22T19:22:32,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/cf6a3dc0af6141fc8e08e5061fcb3870 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/cf6a3dc0af6141fc8e08e5061fcb3870 2024-11-22T19:22:32,930 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/cf6a3dc0af6141fc8e08e5061fcb3870, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:22:32,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cd7c343690ce4263a4936ebe8b26c602 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cd7c343690ce4263a4936ebe8b26c602 2024-11-22T19:22:32,937 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cd7c343690ce4263a4936ebe8b26c602, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:22:32,941 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 7de534ec4ea5964284edbc5ae1079040 in 983ms, sequenceid=89, compaction requested=false 2024-11-22T19:22:32,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:32,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:32,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-22T19:22:32,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-22T19:22:32,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-22T19:22:32,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1390 sec 2024-11-22T19:22:32,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.1460 sec 2024-11-22T19:22:33,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:33,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T19:22:33,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:33,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:33,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:33,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:33,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:33,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:33,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303413159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303413159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303413161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122da356618f0464f7baa937e736ab80b4e_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:33,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303413163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303413163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742001_1177 (size=12154) 2024-11-22T19:22:33,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303413264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303413264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303413269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303413269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303413468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303413468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303413471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303413471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,583 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:33,589 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122da356618f0464f7baa937e736ab80b4e_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122da356618f0464f7baa937e736ab80b4e_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:33,590 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a7535a5349e04d39bdea411c74d89cad, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:33,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a7535a5349e04d39bdea411c74d89cad is 175, key is test_row_0/A:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:33,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742002_1178 (size=30955) 2024-11-22T19:22:33,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303413773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303413773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303413776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303413777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:33,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T19:22:33,911 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-22T19:22:33,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:33,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-22T19:22:33,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T19:22:33,916 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:33,916 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:33,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:34,003 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a7535a5349e04d39bdea411c74d89cad 2024-11-22T19:22:34,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/28c7dd371e0f4c888cdc583b457c56c2 is 50, key is test_row_0/B:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:34,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T19:22:34,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742003_1179 (size=12001) 2024-11-22T19:22:34,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/28c7dd371e0f4c888cdc583b457c56c2 2024-11-22T19:22:34,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/2e4c4cf4cb6447d4986259514c57afe9 is 50, key is test_row_0/C:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:34,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742004_1180 (size=12001) 2024-11-22T19:22:34,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/2e4c4cf4cb6447d4986259514c57afe9 2024-11-22T19:22:34,068 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:34,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T19:22:34,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:34,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a7535a5349e04d39bdea411c74d89cad as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad 2024-11-22T19:22:34,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad, entries=150, sequenceid=120, filesize=30.2 K 2024-11-22T19:22:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/28c7dd371e0f4c888cdc583b457c56c2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/28c7dd371e0f4c888cdc583b457c56c2 2024-11-22T19:22:34,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/28c7dd371e0f4c888cdc583b457c56c2, entries=150, sequenceid=120, filesize=11.7 K 2024-11-22T19:22:34,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/2e4c4cf4cb6447d4986259514c57afe9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/2e4c4cf4cb6447d4986259514c57afe9 2024-11-22T19:22:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/2e4c4cf4cb6447d4986259514c57afe9, entries=150, sequenceid=120, filesize=11.7 K 2024-11-22T19:22:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 7de534ec4ea5964284edbc5ae1079040 in 945ms, sequenceid=120, compaction requested=true 2024-11-22T19:22:34,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:34,099 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:34,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,099 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:34,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:34,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:34,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:34,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:34,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,100 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,100 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:34,100 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,101 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/847941e62f1d4794b8929260e018d016, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=90.8 K 2024-11-22T19:22:34,101 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,101 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/847941e62f1d4794b8929260e018d016, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad] 2024-11-22T19:22:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,101 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:34,101 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:34,101 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,101 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 847941e62f1d4794b8929260e018d016, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732303351569 2024-11-22T19:22:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,102 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e764817545544566812a601c1ab4ca02, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/cf6a3dc0af6141fc8e08e5061fcb3870, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/28c7dd371e0f4c888cdc583b457c56c2] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=35.3 K 2024-11-22T19:22:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,102 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec21766d595141f28e3dd3d947fcf7ba, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303351685 2024-11-22T19:22:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,102 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e764817545544566812a601c1ab4ca02, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732303351569 2024-11-22T19:22:34,103 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cf6a3dc0af6141fc8e08e5061fcb3870, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303351685 2024-11-22T19:22:34,103 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7535a5349e04d39bdea411c74d89cad, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732303352034 2024-11-22T19:22:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,104 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 28c7dd371e0f4c888cdc583b457c56c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732303352034 2024-11-22T19:22:34,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,121 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:34,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,124 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#157 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,125 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/14ce0edb2b824882b35c646cfd4499f5 is 50, key is test_row_0/B:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,132 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411228295003800d140ceba1cb322887489bd_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:34,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,135 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411228295003800d140ceba1cb322887489bd_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,135 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228295003800d140ceba1cb322887489bd_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742005_1181 (size=12241) 2024-11-22T19:22:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,155 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/14ce0edb2b824882b35c646cfd4499f5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/14ce0edb2b824882b35c646cfd4499f5 2024-11-22T19:22:34,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,162 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into 14ce0edb2b824882b35c646cfd4499f5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:34,162 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:34,162 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=13, startTime=1732303354099; duration=0sec 2024-11-22T19:22:34,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,163 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:34,163 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:34,163 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:34,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,164 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:34,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,164 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:34,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,164 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,164 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/b35792afbddc43e6b4d387a579b9a740, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cd7c343690ce4263a4936ebe8b26c602, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/2e4c4cf4cb6447d4986259514c57afe9] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=35.3 K 2024-11-22T19:22:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,165 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b35792afbddc43e6b4d387a579b9a740, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732303351569 2024-11-22T19:22:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,165 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cd7c343690ce4263a4936ebe8b26c602, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303351685 2024-11-22T19:22:34,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,166 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e4c4cf4cb6447d4986259514c57afe9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732303352034 2024-11-22T19:22:34,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742006_1182 (size=4469) 2024-11-22T19:22:34,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,177 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#156 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:34,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,178 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b19cd3f9da9c4872a94a5030e9f91e75 is 175, key is test_row_0/A:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:34,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,183 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#158 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:34,184 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/67c08d5d7baa4ca5b88186783c87d2a0 is 50, key is test_row_0/C:col10/1732303353152/Put/seqid=0 2024-11-22T19:22:34,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742007_1183 (size=31195) 2024-11-22T19:22:34,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b19cd3f9da9c4872a94a5030e9f91e75 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b19cd3f9da9c4872a94a5030e9f91e75 2024-11-22T19:22:34,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:34,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:34,208 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into b19cd3f9da9c4872a94a5030e9f91e75(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:34,208 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:34,208 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=13, startTime=1732303354098; duration=0sec 2024-11-22T19:22:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:34,208 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:34,208 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:34,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742008_1184 (size=12241) 2024-11-22T19:22:34,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T19:22:34,221 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/67c08d5d7baa4ca5b88186783c87d2a0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/67c08d5d7baa4ca5b88186783c87d2a0 2024-11-22T19:22:34,222 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:34,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T19:22:34,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f37ac0a738ab466989d55e773ee2851d_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303354196/Put/seqid=0 2024-11-22T19:22:34,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:34,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,231 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into 67c08d5d7baa4ca5b88186783c87d2a0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:34,231 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:34,231 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=13, startTime=1732303354099; duration=0sec 2024-11-22T19:22:34,231 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:34,231 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:34,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742009_1185 (size=12254) 2024-11-22T19:22:34,248 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,255 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f37ac0a738ab466989d55e773ee2851d_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f37ac0a738ab466989d55e773ee2851d_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:34,256 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/9e42beea5cad4f4a9288dd87a9daad10, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:34,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/9e42beea5cad4f4a9288dd87a9daad10 is 175, key is test_row_0/A:col10/1732303354196/Put/seqid=0 2024-11-22T19:22:34,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742010_1186 (size=31055) 2024-11-22T19:22:34,264 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/9e42beea5cad4f4a9288dd87a9daad10 2024-11-22T19:22:34,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8d6a1e42ed3045e281c8582f8386083a is 50, key is test_row_0/B:col10/1732303354196/Put/seqid=0 2024-11-22T19:22:34,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303414279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303414279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303414280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303414282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303414284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742011_1187 (size=12101) 2024-11-22T19:22:34,376 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:34,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T19:22:34,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:34,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303414388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303414389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T19:22:34,529 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:34,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T19:22:34,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:34,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303414592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303414592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:34,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T19:22:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:34,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:34,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8d6a1e42ed3045e281c8582f8386083a 2024-11-22T19:22:34,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0e5d541d4285432fb1b69513f42f9963 is 50, key is test_row_0/C:col10/1732303354196/Put/seqid=0 2024-11-22T19:22:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742012_1188 (size=12101) 2024-11-22T19:22:34,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0e5d541d4285432fb1b69513f42f9963 2024-11-22T19:22:34,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/9e42beea5cad4f4a9288dd87a9daad10 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10 2024-11-22T19:22:34,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10, entries=150, sequenceid=133, filesize=30.3 K 2024-11-22T19:22:34,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8d6a1e42ed3045e281c8582f8386083a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8d6a1e42ed3045e281c8582f8386083a 2024-11-22T19:22:34,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8d6a1e42ed3045e281c8582f8386083a, entries=150, sequenceid=133, filesize=11.8 K 2024-11-22T19:22:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0e5d541d4285432fb1b69513f42f9963 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e5d541d4285432fb1b69513f42f9963 2024-11-22T19:22:34,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e5d541d4285432fb1b69513f42f9963, entries=150, sequenceid=133, filesize=11.8 K 2024-11-22T19:22:34,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7de534ec4ea5964284edbc5ae1079040 in 539ms, sequenceid=133, compaction requested=false 2024-11-22T19:22:34,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:34,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:34,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,841 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:34,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:34,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a5edcc86766d4933850723d3cc33e413_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303354276/Put/seqid=0 2024-11-22T19:22:34,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742013_1189 (size=12304) 2024-11-22T19:22:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:34,879 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a5edcc86766d4933850723d3cc33e413_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a5edcc86766d4933850723d3cc33e413_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:34,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/73481d5f42c949dca590f9ee7e0a2bd1, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:34,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/73481d5f42c949dca590f9ee7e0a2bd1 is 175, key is test_row_0/A:col10/1732303354276/Put/seqid=0 2024-11-22T19:22:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742014_1190 (size=31105) 2024-11-22T19:22:34,891 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/73481d5f42c949dca590f9ee7e0a2bd1 2024-11-22T19:22:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:34,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:34,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/c5ae991650424ef48e33bb8c6809b335 is 50, key is test_row_0/B:col10/1732303354276/Put/seqid=0 2024-11-22T19:22:34,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303414907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:34,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303414910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:34,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742015_1191 (size=12151) 2024-11-22T19:22:34,919 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/c5ae991650424ef48e33bb8c6809b335 2024-11-22T19:22:34,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ffa3f9843fb14df49d8f45866ca9debe is 50, key is test_row_0/C:col10/1732303354276/Put/seqid=0 2024-11-22T19:22:34,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742016_1192 (size=12151) 2024-11-22T19:22:34,953 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ffa3f9843fb14df49d8f45866ca9debe 2024-11-22T19:22:34,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/73481d5f42c949dca590f9ee7e0a2bd1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1 2024-11-22T19:22:34,968 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1, entries=150, sequenceid=159, filesize=30.4 K 2024-11-22T19:22:34,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/c5ae991650424ef48e33bb8c6809b335 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c5ae991650424ef48e33bb8c6809b335 2024-11-22T19:22:34,975 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c5ae991650424ef48e33bb8c6809b335, entries=150, sequenceid=159, filesize=11.9 K 2024-11-22T19:22:34,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ffa3f9843fb14df49d8f45866ca9debe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ffa3f9843fb14df49d8f45866ca9debe 2024-11-22T19:22:34,981 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ffa3f9843fb14df49d8f45866ca9debe, entries=150, sequenceid=159, filesize=11.9 K 2024-11-22T19:22:34,982 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7de534ec4ea5964284edbc5ae1079040 in 141ms, sequenceid=159, compaction requested=true 2024-11-22T19:22:34,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:34,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:34,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-22T19:22:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-22T19:22:34,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-22T19:22:34,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0660 sec 2024-11-22T19:22:34,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.0730 sec 2024-11-22T19:22:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:35,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:22:35,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:35,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:35,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:35,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:35,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:35,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T19:22:35,022 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-22T19:22:35,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bc0853040956466b9ceedec19fe68200_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-22T19:22:35,025 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:35,026 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:35,026 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:35,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742017_1193 (size=14794) 2024-11-22T19:22:35,040 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:35,045 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bc0853040956466b9ceedec19fe68200_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bc0853040956466b9ceedec19fe68200_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:35,046 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ac81ba1786bc4d398f9eb1b3db803451, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:35,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ac81ba1786bc4d398f9eb1b3db803451 is 175, key is test_row_0/A:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742018_1194 (size=39749) 2024-11-22T19:22:35,065 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ac81ba1786bc4d398f9eb1b3db803451 2024-11-22T19:22:35,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303415074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303415076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/0897412de82749c39cc069e8aeb3bd81 is 50, key is test_row_0/B:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742019_1195 (size=12151) 2024-11-22T19:22:35,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/0897412de82749c39cc069e8aeb3bd81 2024-11-22T19:22:35,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/c3754341a81e4778b526f51ff2b512de is 50, key is test_row_0/C:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742020_1196 (size=12151) 2024-11-22T19:22:35,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/c3754341a81e4778b526f51ff2b512de 2024-11-22T19:22:35,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:35,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/ac81ba1786bc4d398f9eb1b3db803451 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451 2024-11-22T19:22:35,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451, entries=200, sequenceid=171, filesize=38.8 K 2024-11-22T19:22:35,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/0897412de82749c39cc069e8aeb3bd81 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/0897412de82749c39cc069e8aeb3bd81 2024-11-22T19:22:35,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/0897412de82749c39cc069e8aeb3bd81, entries=150, sequenceid=171, filesize=11.9 K 2024-11-22T19:22:35,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/c3754341a81e4778b526f51ff2b512de as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/c3754341a81e4778b526f51ff2b512de 2024-11-22T19:22:35,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/c3754341a81e4778b526f51ff2b512de, entries=150, sequenceid=171, filesize=11.9 K 2024-11-22T19:22:35,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7de534ec4ea5964284edbc5ae1079040 in 141ms, sequenceid=171, compaction requested=true 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:35,154 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:35,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:22:35,155 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:35,157 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133104 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:35,157 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:35,157 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,157 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b19cd3f9da9c4872a94a5030e9f91e75, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=130.0 K 2024-11-22T19:22:35,157 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,157 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b19cd3f9da9c4872a94a5030e9f91e75, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451] 2024-11-22T19:22:35,158 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:35,158 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:35,158 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,158 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/14ce0edb2b824882b35c646cfd4499f5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8d6a1e42ed3045e281c8582f8386083a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c5ae991650424ef48e33bb8c6809b335, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/0897412de82749c39cc069e8aeb3bd81] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=47.5 K 2024-11-22T19:22:35,158 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b19cd3f9da9c4872a94a5030e9f91e75, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732303352034 2024-11-22T19:22:35,159 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 14ce0edb2b824882b35c646cfd4499f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732303352034 2024-11-22T19:22:35,159 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e42beea5cad4f4a9288dd87a9daad10, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732303353160 2024-11-22T19:22:35,159 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d6a1e42ed3045e281c8582f8386083a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732303353160 2024-11-22T19:22:35,159 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73481d5f42c949dca590f9ee7e0a2bd1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732303354276 2024-11-22T19:22:35,160 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c5ae991650424ef48e33bb8c6809b335, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732303354276 2024-11-22T19:22:35,160 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac81ba1786bc4d398f9eb1b3db803451, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732303354901 2024-11-22T19:22:35,161 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0897412de82749c39cc069e8aeb3bd81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732303354905 2024-11-22T19:22:35,176 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#168 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:35,177 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/668c9ee44e144dcb80eb739eb1e6858f is 50, key is test_row_0/B:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,179 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:35,179 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:35,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:35,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:35,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,181 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:35,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:35,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:35,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:35,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122b6f367489a2b49fb8b9aeab9d4ea4bfe_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:35,197 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122b6f367489a2b49fb8b9aeab9d4ea4bfe_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:35,197 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b6f367489a2b49fb8b9aeab9d4ea4bfe_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:35,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303415205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303415206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122dd084e10123a4b4b87acc610e7a4e01d_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303355071/Put/seqid=0 2024-11-22T19:22:35,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742021_1197 (size=12527) 2024-11-22T19:22:35,237 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/668c9ee44e144dcb80eb739eb1e6858f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/668c9ee44e144dcb80eb739eb1e6858f 2024-11-22T19:22:35,245 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into 668c9ee44e144dcb80eb739eb1e6858f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:35,245 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:35,245 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=12, startTime=1732303355154; duration=0sec 2024-11-22T19:22:35,246 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:35,246 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:35,246 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:35,248 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:35,249 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:35,249 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,249 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/67c08d5d7baa4ca5b88186783c87d2a0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e5d541d4285432fb1b69513f42f9963, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ffa3f9843fb14df49d8f45866ca9debe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/c3754341a81e4778b526f51ff2b512de] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=47.5 K 2024-11-22T19:22:35,249 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 67c08d5d7baa4ca5b88186783c87d2a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732303352034 2024-11-22T19:22:35,250 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e5d541d4285432fb1b69513f42f9963, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732303353160 2024-11-22T19:22:35,250 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ffa3f9843fb14df49d8f45866ca9debe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732303354276 2024-11-22T19:22:35,251 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c3754341a81e4778b526f51ff2b512de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732303354905 2024-11-22T19:22:35,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742022_1198 (size=4469) 2024-11-22T19:22:35,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742023_1199 (size=14794) 2024-11-22T19:22:35,273 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#169 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:35,273 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/5482a00a62e14cfd8474ad19b71e7cd5 is 175, key is test_row_0/A:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,278 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#171 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:35,278 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/d9f8154f795043a3bba02c8e28a1c498 is 50, key is test_row_0/C:col10/1732303354908/Put/seqid=0 2024-11-22T19:22:35,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303415289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303415292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303415292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742025_1201 (size=12527) 2024-11-22T19:22:35,305 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/d9f8154f795043a3bba02c8e28a1c498 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d9f8154f795043a3bba02c8e28a1c498 2024-11-22T19:22:35,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742024_1200 (size=31481) 2024-11-22T19:22:35,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303415310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303415311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,320 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/5482a00a62e14cfd8474ad19b71e7cd5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/5482a00a62e14cfd8474ad19b71e7cd5 2024-11-22T19:22:35,325 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into d9f8154f795043a3bba02c8e28a1c498(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:35,325 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:35,325 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=12, startTime=1732303355154; duration=0sec 2024-11-22T19:22:35,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:35,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:35,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:35,331 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into 5482a00a62e14cfd8474ad19b71e7cd5(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:35,331 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:35,331 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=12, startTime=1732303355154; duration=0sec 2024-11-22T19:22:35,332 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:35,332 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:35,334 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:35,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:35,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:35,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:35,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:35,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:35,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303415514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303415514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:35,640 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:35,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:35,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:35,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,671 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:35,679 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122dd084e10123a4b4b87acc610e7a4e01d_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122dd084e10123a4b4b87acc610e7a4e01d_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:35,680 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/24c2a954cefb44c3b3e2d1dfbf02e16b, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:35,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/24c2a954cefb44c3b3e2d1dfbf02e16b is 175, key is test_row_0/A:col10/1732303355071/Put/seqid=0 2024-11-22T19:22:35,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742026_1202 (size=39749) 2024-11-22T19:22:35,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:35,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:35,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:35,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303415817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303415818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:35,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:35,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:35,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:35,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:35,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,100 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/24c2a954cefb44c3b3e2d1dfbf02e16b 2024-11-22T19:22:36,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:36,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:36,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:36,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8cfd0af0ab494356b38636c31795d74c is 50, key is test_row_0/B:col10/1732303355071/Put/seqid=0 2024-11-22T19:22:36,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:36,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742027_1203 (size=12151) 2024-11-22T19:22:36,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8cfd0af0ab494356b38636c31795d74c 2024-11-22T19:22:36,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/3b2a5a660ea94c089d7d264be69a3123 is 50, key is test_row_0/C:col10/1732303355071/Put/seqid=0 2024-11-22T19:22:36,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742028_1204 (size=12151) 2024-11-22T19:22:36,259 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:36,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303416322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:36,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303416323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:36,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:36,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:36,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:36,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,567 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:36,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:36,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:36,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:36,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/3b2a5a660ea94c089d7d264be69a3123 2024-11-22T19:22:36,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/24c2a954cefb44c3b3e2d1dfbf02e16b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b 2024-11-22T19:22:36,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b, entries=200, sequenceid=196, filesize=38.8 K 2024-11-22T19:22:36,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8cfd0af0ab494356b38636c31795d74c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8cfd0af0ab494356b38636c31795d74c 2024-11-22T19:22:36,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8cfd0af0ab494356b38636c31795d74c, entries=150, sequenceid=196, filesize=11.9 K 2024-11-22T19:22:36,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/3b2a5a660ea94c089d7d264be69a3123 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/3b2a5a660ea94c089d7d264be69a3123 2024-11-22T19:22:36,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/3b2a5a660ea94c089d7d264be69a3123, entries=150, sequenceid=196, filesize=11.9 K 2024-11-22T19:22:36,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7de534ec4ea5964284edbc5ae1079040 in 1474ms, sequenceid=196, compaction requested=false 2024-11-22T19:22:36,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:36,720 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:36,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T19:22:36,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:36,722 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:36,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122531931883dd649558afb469ee76598e4_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303355191/Put/seqid=0 2024-11-22T19:22:36,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742029_1205 (size=12304) 2024-11-22T19:22:36,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:36,794 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122531931883dd649558afb469ee76598e4_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122531931883dd649558afb469ee76598e4_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:36,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/6e31f037d7c04f99a96945dfc1557f59, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:36,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/6e31f037d7c04f99a96945dfc1557f59 is 175, key is test_row_0/A:col10/1732303355191/Put/seqid=0 2024-11-22T19:22:36,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742030_1206 (size=31105) 2024-11-22T19:22:36,833 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/6e31f037d7c04f99a96945dfc1557f59 2024-11-22T19:22:36,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/07543b89e57449deb0a73920e2180d72 is 50, key is test_row_0/B:col10/1732303355191/Put/seqid=0 2024-11-22T19:22:36,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742031_1207 (size=12151) 2024-11-22T19:22:36,900 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/07543b89e57449deb0a73920e2180d72 2024-11-22T19:22:36,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/f16f40ce771c4c878a8a9aabc4175394 is 50, key is test_row_0/C:col10/1732303355191/Put/seqid=0 2024-11-22T19:22:36,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742032_1208 (size=12151) 2024-11-22T19:22:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:37,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:37,366 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/f16f40ce771c4c878a8a9aabc4175394 2024-11-22T19:22:37,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303417360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303417362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303417373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303417373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/6e31f037d7c04f99a96945dfc1557f59 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59 2024-11-22T19:22:37,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303417378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,387 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59, entries=150, sequenceid=210, filesize=30.4 K 2024-11-22T19:22:37,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/07543b89e57449deb0a73920e2180d72 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/07543b89e57449deb0a73920e2180d72 2024-11-22T19:22:37,394 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/07543b89e57449deb0a73920e2180d72, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:22:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/f16f40ce771c4c878a8a9aabc4175394 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f16f40ce771c4c878a8a9aabc4175394 2024-11-22T19:22:37,401 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f16f40ce771c4c878a8a9aabc4175394, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:22:37,403 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7de534ec4ea5964284edbc5ae1079040 in 680ms, sequenceid=210, compaction requested=true 2024-11-22T19:22:37,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:37,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:37,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-22T19:22:37,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-22T19:22:37,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-22T19:22:37,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3790 sec 2024-11-22T19:22:37,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.3850 sec 2024-11-22T19:22:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:37,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T19:22:37,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:37,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:37,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:37,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:37,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:37,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:37,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303417497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303417497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303417507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303417508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122451989672b1b4d779c6e999dfec056e5_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:37,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303417509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742033_1209 (size=12304) 2024-11-22T19:22:37,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303417611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303417610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303417613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303417613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303417622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303417817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303417817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303417818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303417818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303417828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:37,955 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:37,967 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122451989672b1b4d779c6e999dfec056e5_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122451989672b1b4d779c6e999dfec056e5_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:37,968 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/7a2fd55cd96d40e9a910b55b09b14a5a, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:37,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/7a2fd55cd96d40e9a910b55b09b14a5a is 175, key is test_row_0/A:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:38,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742034_1210 (size=31105) 2024-11-22T19:22:38,044 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/7a2fd55cd96d40e9a910b55b09b14a5a 2024-11-22T19:22:38,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6175e4265bcb49f293a8546553150c95 is 50, key is test_row_0/B:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:38,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742035_1211 (size=12151) 2024-11-22T19:22:38,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6175e4265bcb49f293a8546553150c95 2024-11-22T19:22:38,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303418121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303418122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303418123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303418124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303418141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cb4095c8eea0432a8e55dd05b467fe0d is 50, key is test_row_0/C:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:38,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742036_1212 (size=12151) 2024-11-22T19:22:38,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cb4095c8eea0432a8e55dd05b467fe0d 2024-11-22T19:22:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/7a2fd55cd96d40e9a910b55b09b14a5a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a 2024-11-22T19:22:38,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a, entries=150, sequenceid=238, filesize=30.4 K 2024-11-22T19:22:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6175e4265bcb49f293a8546553150c95 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6175e4265bcb49f293a8546553150c95 2024-11-22T19:22:38,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6175e4265bcb49f293a8546553150c95, entries=150, sequenceid=238, filesize=11.9 K 2024-11-22T19:22:38,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cb4095c8eea0432a8e55dd05b467fe0d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cb4095c8eea0432a8e55dd05b467fe0d 2024-11-22T19:22:38,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cb4095c8eea0432a8e55dd05b467fe0d, entries=150, sequenceid=238, filesize=11.9 K 2024-11-22T19:22:38,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 7de534ec4ea5964284edbc5ae1079040 in 745ms, sequenceid=238, compaction requested=true 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:38,225 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:38,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:22:38,225 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:38,227 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:38,227 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133440 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:38,227 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:38,227 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:38,227 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:38,227 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:38,228 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/668c9ee44e144dcb80eb739eb1e6858f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8cfd0af0ab494356b38636c31795d74c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/07543b89e57449deb0a73920e2180d72, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6175e4265bcb49f293a8546553150c95] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=47.8 K 2024-11-22T19:22:38,228 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/5482a00a62e14cfd8474ad19b71e7cd5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=130.3 K 2024-11-22T19:22:38,228 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:38,228 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/5482a00a62e14cfd8474ad19b71e7cd5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a] 2024-11-22T19:22:38,228 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 668c9ee44e144dcb80eb739eb1e6858f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732303354905 2024-11-22T19:22:38,228 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5482a00a62e14cfd8474ad19b71e7cd5, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732303354905 2024-11-22T19:22:38,229 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cfd0af0ab494356b38636c31795d74c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732303355071 2024-11-22T19:22:38,229 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24c2a954cefb44c3b3e2d1dfbf02e16b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732303355069 2024-11-22T19:22:38,229 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 07543b89e57449deb0a73920e2180d72, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303355191 2024-11-22T19:22:38,229 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e31f037d7c04f99a96945dfc1557f59, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303355191 2024-11-22T19:22:38,230 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6175e4265bcb49f293a8546553150c95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732303357360 2024-11-22T19:22:38,230 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a2fd55cd96d40e9a910b55b09b14a5a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732303357360 2024-11-22T19:22:38,250 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#180 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:38,250 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3 is 50, key is test_row_0/B:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:38,260 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:38,276 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122cbfcc076afaf4189bdbcecc7bf5e2501_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:38,278 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122cbfcc076afaf4189bdbcecc7bf5e2501_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:38,279 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cbfcc076afaf4189bdbcecc7bf5e2501_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:38,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742037_1213 (size=12663) 2024-11-22T19:22:38,302 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3 2024-11-22T19:22:38,309 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into f5d2cf31e9314d2f83e7c3fdd2e18ce3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:38,309 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:38,309 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=12, startTime=1732303358225; duration=0sec 2024-11-22T19:22:38,309 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:38,309 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:38,309 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:38,310 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:38,311 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:38,311 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:38,311 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d9f8154f795043a3bba02c8e28a1c498, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/3b2a5a660ea94c089d7d264be69a3123, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f16f40ce771c4c878a8a9aabc4175394, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cb4095c8eea0432a8e55dd05b467fe0d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=47.8 K 2024-11-22T19:22:38,311 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d9f8154f795043a3bba02c8e28a1c498, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732303354905 2024-11-22T19:22:38,312 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b2a5a660ea94c089d7d264be69a3123, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732303355071 2024-11-22T19:22:38,312 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting f16f40ce771c4c878a8a9aabc4175394, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303355191 2024-11-22T19:22:38,313 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cb4095c8eea0432a8e55dd05b467fe0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732303357360 2024-11-22T19:22:38,333 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#182 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:38,334 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/bdfe693bd15d4f9d8028f454740602d2 is 50, key is test_row_0/C:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:38,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742038_1214 (size=4469) 2024-11-22T19:22:38,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742039_1215 (size=12663) 2024-11-22T19:22:38,381 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/bdfe693bd15d4f9d8028f454740602d2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/bdfe693bd15d4f9d8028f454740602d2 2024-11-22T19:22:38,395 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into bdfe693bd15d4f9d8028f454740602d2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:38,395 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:38,395 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=12, startTime=1732303358225; duration=0sec 2024-11-22T19:22:38,395 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:38,395 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:38,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:38,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:22:38,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:38,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:38,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:38,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:38,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:38,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:38,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303418670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d7b0c02328464bd4b67e2715afddcd1b_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303357496/Put/seqid=0 2024-11-22T19:22:38,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303418672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303418674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303418678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303418679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742040_1216 (size=12304) 2024-11-22T19:22:38,735 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#181 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:38,735 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/2e6c95bad4b04a769057680403acab6b is 175, key is test_row_0/A:col10/1732303357360/Put/seqid=0 2024-11-22T19:22:38,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742041_1217 (size=31617) 2024-11-22T19:22:38,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303418780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303418782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303418784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303418785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303418786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303418984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303418985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303418992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303418992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:38,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303418992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,113 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:39,121 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d7b0c02328464bd4b67e2715afddcd1b_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d7b0c02328464bd4b67e2715afddcd1b_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:39,124 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/f8d66935835e4ac89cfd9d8e68f3c9c0, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:39,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/f8d66935835e4ac89cfd9d8e68f3c9c0 is 175, key is test_row_0/A:col10/1732303357496/Put/seqid=0 2024-11-22T19:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T19:22:39,142 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-22T19:22:39,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-22T19:22:39,147 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:39,148 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:39,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T19:22:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742042_1218 (size=31105) 2024-11-22T19:22:39,160 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/f8d66935835e4ac89cfd9d8e68f3c9c0 2024-11-22T19:22:39,161 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/2e6c95bad4b04a769057680403acab6b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2e6c95bad4b04a769057680403acab6b 2024-11-22T19:22:39,186 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into 2e6c95bad4b04a769057680403acab6b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:39,186 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:39,186 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=12, startTime=1732303358225; duration=0sec 2024-11-22T19:22:39,186 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:39,186 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:39,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/33505ea5915f4e08b4a5f9be4425a217 is 50, key is test_row_0/B:col10/1732303357496/Put/seqid=0 2024-11-22T19:22:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T19:22:39,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742043_1219 (size=12151) 2024-11-22T19:22:39,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303419291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303419291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303419294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303419297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303419297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,302 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:39,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-22T19:22:39,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:39,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T19:22:39,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:39,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-22T19:22:39,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:39,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:39,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-22T19:22:39,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:39,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/33505ea5915f4e08b4a5f9be4425a217 2024-11-22T19:22:39,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/581c741a40e74d8cbf4139684a778367 is 50, key is test_row_0/C:col10/1732303357496/Put/seqid=0 2024-11-22T19:22:39,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742044_1220 (size=12151) 2024-11-22T19:22:39,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/581c741a40e74d8cbf4139684a778367 2024-11-22T19:22:39,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/f8d66935835e4ac89cfd9d8e68f3c9c0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0 2024-11-22T19:22:39,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0, entries=150, sequenceid=253, filesize=30.4 K 2024-11-22T19:22:39,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/33505ea5915f4e08b4a5f9be4425a217 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/33505ea5915f4e08b4a5f9be4425a217 2024-11-22T19:22:39,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/33505ea5915f4e08b4a5f9be4425a217, entries=150, sequenceid=253, filesize=11.9 K 2024-11-22T19:22:39,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/581c741a40e74d8cbf4139684a778367 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/581c741a40e74d8cbf4139684a778367 2024-11-22T19:22:39,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/581c741a40e74d8cbf4139684a778367, entries=150, sequenceid=253, filesize=11.9 K 2024-11-22T19:22:39,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7de534ec4ea5964284edbc5ae1079040 in 1113ms, sequenceid=253, compaction requested=false 2024-11-22T19:22:39,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:39,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T19:22:39,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:39,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-22T19:22:39,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:39,765 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:22:39,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:39,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:39,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:39,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:39,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:39,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:39,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112287012928142f462c8ed707bfcbf27dd1_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303358672/Put/seqid=0 2024-11-22T19:22:39,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:39,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:39,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742045_1221 (size=12454) 2024-11-22T19:22:39,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303419860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303419861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303419870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303419872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303419870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,906 INFO [master/a307a1377457:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T19:22:39,906 INFO [master/a307a1377457:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T19:22:39,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303419973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303419973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303419980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303419986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303419990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303420177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303420178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303420186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303420189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303420194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T19:22:40,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:40,269 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112287012928142f462c8ed707bfcbf27dd1_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112287012928142f462c8ed707bfcbf27dd1_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:40,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/146df1bc568d4f81b3ce544c0942be3b, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:40,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/146df1bc568d4f81b3ce544c0942be3b is 175, key is test_row_0/A:col10/1732303358672/Put/seqid=0 2024-11-22T19:22:40,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742046_1222 (size=31255) 2024-11-22T19:22:40,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303420484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303420485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303420494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303420497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303420499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:40,695 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/146df1bc568d4f81b3ce544c0942be3b 2024-11-22T19:22:40,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/e68952eb4a2244bcbc99c5a0faf6e0de is 50, key is test_row_0/B:col10/1732303358672/Put/seqid=0 2024-11-22T19:22:40,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742047_1223 (size=12301) 2024-11-22T19:22:40,746 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/e68952eb4a2244bcbc99c5a0faf6e0de 2024-11-22T19:22:40,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ddd6b1b6140c4057b08b84ba5bb09cac is 50, key is test_row_0/C:col10/1732303358672/Put/seqid=0 2024-11-22T19:22:40,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742048_1224 (size=12301) 2024-11-22T19:22:40,829 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ddd6b1b6140c4057b08b84ba5bb09cac 2024-11-22T19:22:40,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/146df1bc568d4f81b3ce544c0942be3b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b 2024-11-22T19:22:40,844 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b, entries=150, sequenceid=277, filesize=30.5 K 2024-11-22T19:22:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/e68952eb4a2244bcbc99c5a0faf6e0de as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e68952eb4a2244bcbc99c5a0faf6e0de 2024-11-22T19:22:40,854 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e68952eb4a2244bcbc99c5a0faf6e0de, entries=150, sequenceid=277, filesize=12.0 K 2024-11-22T19:22:40,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ddd6b1b6140c4057b08b84ba5bb09cac as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ddd6b1b6140c4057b08b84ba5bb09cac 2024-11-22T19:22:40,863 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ddd6b1b6140c4057b08b84ba5bb09cac, entries=150, sequenceid=277, filesize=12.0 K 2024-11-22T19:22:40,866 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 7de534ec4ea5964284edbc5ae1079040 in 1101ms, sequenceid=277, compaction requested=true 2024-11-22T19:22:40,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:40,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:40,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-22T19:22:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-22T19:22:40,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-22T19:22:40,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7210 sec 2024-11-22T19:22:40,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.7260 sec 2024-11-22T19:22:40,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T19:22:40,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:41,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:41,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:41,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:41,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:41,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:41,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:41,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222ad06c0ec40141109122a13a453389db_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303421027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303421027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303421029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303421030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303421036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742049_1225 (size=12454) 2024-11-22T19:22:41,067 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:41,077 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222ad06c0ec40141109122a13a453389db_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222ad06c0ec40141109122a13a453389db_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:41,078 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a863583632ee44a1b8cc343cbbb927ef, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:41,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a863583632ee44a1b8cc343cbbb927ef is 175, key is test_row_0/A:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742050_1226 (size=31255) 2024-11-22T19:22:41,115 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a863583632ee44a1b8cc343cbbb927ef 2024-11-22T19:22:41,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303421138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303421139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/4bc1a8dc70bd4f9f8c1abad7991bb97e is 50, key is test_row_0/B:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303421139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303421140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303421141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742051_1227 (size=12301) 2024-11-22T19:22:41,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/4bc1a8dc70bd4f9f8c1abad7991bb97e 2024-11-22T19:22:41,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/6abe85f18dae4d35987672cca9a61ead is 50, key is test_row_0/C:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742052_1228 (size=12301) 2024-11-22T19:22:41,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/6abe85f18dae4d35987672cca9a61ead 2024-11-22T19:22:41,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/a863583632ee44a1b8cc343cbbb927ef as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef 2024-11-22T19:22:41,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef, entries=150, sequenceid=293, filesize=30.5 K 2024-11-22T19:22:41,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/4bc1a8dc70bd4f9f8c1abad7991bb97e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4bc1a8dc70bd4f9f8c1abad7991bb97e 2024-11-22T19:22:41,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4bc1a8dc70bd4f9f8c1abad7991bb97e, entries=150, sequenceid=293, filesize=12.0 K 2024-11-22T19:22:41,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/6abe85f18dae4d35987672cca9a61ead as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6abe85f18dae4d35987672cca9a61ead 2024-11-22T19:22:41,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6abe85f18dae4d35987672cca9a61ead, entries=150, sequenceid=293, filesize=12.0 K 2024-11-22T19:22:41,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7de534ec4ea5964284edbc5ae1079040 in 246ms, sequenceid=293, compaction requested=true 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:41,243 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:22:41,243 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:41,246 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:41,246 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:41,246 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,246 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2e6c95bad4b04a769057680403acab6b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=122.3 K 2024-11-22T19:22:41,246 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,246 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2e6c95bad4b04a769057680403acab6b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef] 2024-11-22T19:22:41,247 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e6c95bad4b04a769057680403acab6b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732303357360 2024-11-22T19:22:41,248 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:41,248 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:41,248 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,248 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/33505ea5915f4e08b4a5f9be4425a217, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e68952eb4a2244bcbc99c5a0faf6e0de, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4bc1a8dc70bd4f9f8c1abad7991bb97e] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=48.3 K 2024-11-22T19:22:41,248 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8d66935835e4ac89cfd9d8e68f3c9c0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732303357496 2024-11-22T19:22:41,248 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting f5d2cf31e9314d2f83e7c3fdd2e18ce3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732303357360 2024-11-22T19:22:41,250 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 146df1bc568d4f81b3ce544c0942be3b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732303358672 2024-11-22T19:22:41,250 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 33505ea5915f4e08b4a5f9be4425a217, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732303357496 2024-11-22T19:22:41,250 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e68952eb4a2244bcbc99c5a0faf6e0de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732303358672 2024-11-22T19:22:41,250 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a863583632ee44a1b8cc343cbbb927ef, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732303359868 2024-11-22T19:22:41,251 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bc1a8dc70bd4f9f8c1abad7991bb97e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732303359868 2024-11-22T19:22:41,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T19:22:41,256 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-22T19:22:41,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:41,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-22T19:22:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:41,263 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:41,264 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:41,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:41,268 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:41,269 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/4375a22ea1104a6bba1d139278de201a is 50, key is test_row_0/B:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,283 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:41,301 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122eb7a05146eeb4eaeb7001e8651d4035c_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:41,303 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122eb7a05146eeb4eaeb7001e8651d4035c_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:41,303 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122eb7a05146eeb4eaeb7001e8651d4035c_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:41,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742053_1229 (size=12949) 2024-11-22T19:22:41,335 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/4375a22ea1104a6bba1d139278de201a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4375a22ea1104a6bba1d139278de201a 2024-11-22T19:22:41,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:41,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T19:22:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:41,345 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into 4375a22ea1104a6bba1d139278de201a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:41,345 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:41,345 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=12, startTime=1732303361243; duration=0sec 2024-11-22T19:22:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:41,345 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:41,345 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:41,345 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742054_1230 (size=4469) 2024-11-22T19:22:41,350 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:41,350 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:41,350 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,350 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/bdfe693bd15d4f9d8028f454740602d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/581c741a40e74d8cbf4139684a778367, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ddd6b1b6140c4057b08b84ba5bb09cac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6abe85f18dae4d35987672cca9a61ead] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=48.3 K 2024-11-22T19:22:41,351 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting bdfe693bd15d4f9d8028f454740602d2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732303357360 2024-11-22T19:22:41,351 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 581c741a40e74d8cbf4139684a778367, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732303357496 2024-11-22T19:22:41,352 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ddd6b1b6140c4057b08b84ba5bb09cac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732303358672 2024-11-22T19:22:41,353 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6abe85f18dae4d35987672cca9a61ead, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732303359868 2024-11-22T19:22:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:41,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e91f531396344cc5af7e129a78e9777d_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303361342/Put/seqid=0 2024-11-22T19:22:41,384 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#195 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:41,384 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/e6dc0c95e02449549ba29b1e522d4bdf is 50, key is test_row_0/C:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303421390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303421391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303421392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303421392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303421391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:41,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:41,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:41,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742055_1231 (size=14994) 2024-11-22T19:22:41,423 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:41,431 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e91f531396344cc5af7e129a78e9777d_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e91f531396344cc5af7e129a78e9777d_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:41,433 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/58017801bf4443ec8f7f13f65d33aa55, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:41,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/58017801bf4443ec8f7f13f65d33aa55 is 175, key is test_row_0/A:col10/1732303361342/Put/seqid=0 2024-11-22T19:22:41,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742056_1232 (size=12949) 2024-11-22T19:22:41,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742057_1233 (size=39949) 2024-11-22T19:22:41,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303421502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303421502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303421502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303421502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303421503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:41,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:41,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303421711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303421712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303421712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303421712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:41,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303421713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:41,724 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:41,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:41,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:41,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,751 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#193 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:41,752 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/db02aca7584e461cb242539fc5835a1c is 175, key is test_row_0/A:col10/1732303360996/Put/seqid=0 2024-11-22T19:22:41,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742058_1234 (size=31903) 2024-11-22T19:22:41,853 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/e6dc0c95e02449549ba29b1e522d4bdf as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e6dc0c95e02449549ba29b1e522d4bdf 2024-11-22T19:22:41,862 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into e6dc0c95e02449549ba29b1e522d4bdf(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:41,862 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:41,862 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=12, startTime=1732303361243; duration=0sec 2024-11-22T19:22:41,862 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:41,862 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:41,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:41,877 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/58017801bf4443ec8f7f13f65d33aa55 2024-11-22T19:22:41,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:41,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:41,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:41,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:41,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:41,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6b730175e94a4df79f6bb2e0ce4d6b00 is 50, key is test_row_0/B:col10/1732303361342/Put/seqid=0 2024-11-22T19:22:41,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742059_1235 (size=12301) 2024-11-22T19:22:41,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6b730175e94a4df79f6bb2e0ce4d6b00 2024-11-22T19:22:41,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cc94112ec4cd4ba2bdbbd7a986fb29f9 is 50, key is test_row_0/C:col10/1732303361342/Put/seqid=0 2024-11-22T19:22:41,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742060_1236 (size=12301) 2024-11-22T19:22:42,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303422014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303422015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303422017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303422018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303422019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:42,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:42,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:42,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,180 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/db02aca7584e461cb242539fc5835a1c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db02aca7584e461cb242539fc5835a1c 2024-11-22T19:22:42,189 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:42,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:42,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:42,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,201 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into db02aca7584e461cb242539fc5835a1c(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:42,201 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:42,201 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=12, startTime=1732303361243; duration=0sec 2024-11-22T19:22:42,201 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:42,201 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:42,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:42,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:42,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cc94112ec4cd4ba2bdbbd7a986fb29f9 2024-11-22T19:22:42,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/58017801bf4443ec8f7f13f65d33aa55 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55 2024-11-22T19:22:42,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55, entries=200, sequenceid=315, filesize=39.0 K 2024-11-22T19:22:42,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6b730175e94a4df79f6bb2e0ce4d6b00 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6b730175e94a4df79f6bb2e0ce4d6b00 2024-11-22T19:22:42,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6b730175e94a4df79f6bb2e0ce4d6b00, entries=150, sequenceid=315, filesize=12.0 K 2024-11-22T19:22:42,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/cc94112ec4cd4ba2bdbbd7a986fb29f9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cc94112ec4cd4ba2bdbbd7a986fb29f9 2024-11-22T19:22:42,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cc94112ec4cd4ba2bdbbd7a986fb29f9, entries=150, sequenceid=315, filesize=12.0 K 2024-11-22T19:22:42,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=93.93 KB/96180 for 7de534ec4ea5964284edbc5ae1079040 in 1075ms, sequenceid=315, compaction requested=false 2024-11-22T19:22:42,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:42,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:42,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T19:22:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:42,498 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T19:22:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:42,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:42,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:42,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:42,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122763a6a42209949a5936d936a5ffb65a1_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303361390/Put/seqid=0 2024-11-22T19:22:42,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303422540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303422543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303422545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303422549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303422549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742061_1237 (size=12454) 2024-11-22T19:22:42,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303422651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303422652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303422660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303422661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303422660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303422857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303422857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303422866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303422869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303422870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:42,997 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122763a6a42209949a5936d936a5ffb65a1_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122763a6a42209949a5936d936a5ffb65a1_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:42,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/115ad87b126141669e859c7f880e0f27, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:42,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/115ad87b126141669e859c7f880e0f27 is 175, key is test_row_0/A:col10/1732303361390/Put/seqid=0 2024-11-22T19:22:43,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742062_1238 (size=31255) 2024-11-22T19:22:43,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303423163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303423162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303423175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303423175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303423175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:43,419 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/115ad87b126141669e859c7f880e0f27 2024-11-22T19:22:43,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/1ad4473a599b4614bdaa4432ef47ae5c is 50, key is test_row_0/B:col10/1732303361390/Put/seqid=0 2024-11-22T19:22:43,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742063_1239 (size=12301) 2024-11-22T19:22:43,473 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/1ad4473a599b4614bdaa4432ef47ae5c 2024-11-22T19:22:43,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/febc62383c594099a652d83adbb102e9 is 50, key is test_row_0/C:col10/1732303361390/Put/seqid=0 2024-11-22T19:22:43,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742064_1240 (size=12301) 2024-11-22T19:22:43,534 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/febc62383c594099a652d83adbb102e9 2024-11-22T19:22:43,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/115ad87b126141669e859c7f880e0f27 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27 2024-11-22T19:22:43,548 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27, entries=150, sequenceid=334, filesize=30.5 K 2024-11-22T19:22:43,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/1ad4473a599b4614bdaa4432ef47ae5c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/1ad4473a599b4614bdaa4432ef47ae5c 2024-11-22T19:22:43,557 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/1ad4473a599b4614bdaa4432ef47ae5c, entries=150, sequenceid=334, filesize=12.0 K 2024-11-22T19:22:43,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/febc62383c594099a652d83adbb102e9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/febc62383c594099a652d83adbb102e9 2024-11-22T19:22:43,566 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/febc62383c594099a652d83adbb102e9, entries=150, sequenceid=334, filesize=12.0 K 2024-11-22T19:22:43,568 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 7de534ec4ea5964284edbc5ae1079040 in 1070ms, sequenceid=334, compaction requested=true 2024-11-22T19:22:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-22T19:22:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-22T19:22:43,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-22T19:22:43,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3060 sec 2024-11-22T19:22:43,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.3130 sec 2024-11-22T19:22:43,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T19:22:43,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:43,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:43,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:43,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:43,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:43,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:43,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:43,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223eeba8f47b1f4f678653cef66459c67b_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:43,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303423701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303423702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303423702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303423703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303423703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742065_1241 (size=12454) 2024-11-22T19:22:43,746 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:43,752 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223eeba8f47b1f4f678653cef66459c67b_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223eeba8f47b1f4f678653cef66459c67b_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:43,753 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b1c28a196c7e4c128b9f2c23cd152a32, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:43,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b1c28a196c7e4c128b9f2c23cd152a32 is 175, key is test_row_0/A:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:43,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742066_1242 (size=31255) 2024-11-22T19:22:43,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303423808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303423809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303423809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303423810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:43,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:43,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303423810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303424015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303424017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303424017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303424017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303424017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,185 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=354, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b1c28a196c7e4c128b9f2c23cd152a32 2024-11-22T19:22:44,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8f0406f57f554733aefa2fafb56abc27 is 50, key is test_row_0/B:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:44,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742067_1243 (size=12301) 2024-11-22T19:22:44,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8f0406f57f554733aefa2fafb56abc27 2024-11-22T19:22:44,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/7c8e94dd482f495da9c5e446983a6d48 is 50, key is test_row_0/C:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:44,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742068_1244 (size=12301) 2024-11-22T19:22:44,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/7c8e94dd482f495da9c5e446983a6d48 2024-11-22T19:22:44,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303424322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303424322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303424323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303424323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303424323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b1c28a196c7e4c128b9f2c23cd152a32 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32 2024-11-22T19:22:44,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32, entries=150, sequenceid=354, filesize=30.5 K 2024-11-22T19:22:44,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/8f0406f57f554733aefa2fafb56abc27 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8f0406f57f554733aefa2fafb56abc27 2024-11-22T19:22:44,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8f0406f57f554733aefa2fafb56abc27, entries=150, sequenceid=354, filesize=12.0 K 2024-11-22T19:22:44,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/7c8e94dd482f495da9c5e446983a6d48 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/7c8e94dd482f495da9c5e446983a6d48 2024-11-22T19:22:44,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/7c8e94dd482f495da9c5e446983a6d48, entries=150, sequenceid=354, filesize=12.0 K 2024-11-22T19:22:44,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 7de534ec4ea5964284edbc5ae1079040 in 703ms, sequenceid=354, compaction requested=true 2024-11-22T19:22:44,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:44,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:44,374 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:44,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:44,375 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:44,377 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:44,377 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:44,377 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:44,378 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4375a22ea1104a6bba1d139278de201a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6b730175e94a4df79f6bb2e0ce4d6b00, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/1ad4473a599b4614bdaa4432ef47ae5c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8f0406f57f554733aefa2fafb56abc27] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=48.7 K 2024-11-22T19:22:44,378 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:44,378 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:44,378 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:44,378 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db02aca7584e461cb242539fc5835a1c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=131.2 K 2024-11-22T19:22:44,378 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:44,378 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db02aca7584e461cb242539fc5835a1c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32] 2024-11-22T19:22:44,379 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4375a22ea1104a6bba1d139278de201a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732303359868 2024-11-22T19:22:44,379 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting db02aca7584e461cb242539fc5835a1c, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732303359868 2024-11-22T19:22:44,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:44,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:44,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:44,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:44,380 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b730175e94a4df79f6bb2e0ce4d6b00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732303361026 2024-11-22T19:22:44,381 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58017801bf4443ec8f7f13f65d33aa55, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732303361026 2024-11-22T19:22:44,382 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ad4473a599b4614bdaa4432ef47ae5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732303361386 2024-11-22T19:22:44,382 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 115ad87b126141669e859c7f880e0f27, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732303361386 2024-11-22T19:22:44,383 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f0406f57f554733aefa2fafb56abc27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732303362532 2024-11-22T19:22:44,384 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1c28a196c7e4c128b9f2c23cd152a32, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732303362532 2024-11-22T19:22:44,398 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#204 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:44,399 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/858c01ea420d4deaa55c762995c4b995 is 50, key is test_row_0/B:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:44,414 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:44,450 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122407475636efe4053bdbd6b37c015bdf4_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:44,452 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122407475636efe4053bdbd6b37c015bdf4_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:44,453 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122407475636efe4053bdbd6b37c015bdf4_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:44,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742069_1245 (size=13085) 2024-11-22T19:22:44,476 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/858c01ea420d4deaa55c762995c4b995 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/858c01ea420d4deaa55c762995c4b995 2024-11-22T19:22:44,483 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into 858c01ea420d4deaa55c762995c4b995(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:44,483 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:44,483 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=12, startTime=1732303364375; duration=0sec 2024-11-22T19:22:44,483 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:44,483 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:44,483 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:44,486 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:44,486 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:44,486 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:44,486 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e6dc0c95e02449549ba29b1e522d4bdf, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cc94112ec4cd4ba2bdbbd7a986fb29f9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/febc62383c594099a652d83adbb102e9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/7c8e94dd482f495da9c5e446983a6d48] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=48.7 K 2024-11-22T19:22:44,487 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e6dc0c95e02449549ba29b1e522d4bdf, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732303359868 2024-11-22T19:22:44,487 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cc94112ec4cd4ba2bdbbd7a986fb29f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732303361026 2024-11-22T19:22:44,488 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting febc62383c594099a652d83adbb102e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732303361386 2024-11-22T19:22:44,488 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c8e94dd482f495da9c5e446983a6d48, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732303362532 2024-11-22T19:22:44,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742070_1246 (size=4469) 2024-11-22T19:22:44,497 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#205 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:44,498 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/2aaad0527624401489eceb4016d124ac is 175, key is test_row_0/A:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:44,509 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#206 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:44,510 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ab5a7b4963dd43038d2547ab39b6bcc2 is 50, key is test_row_0/C:col10/1732303362547/Put/seqid=0 2024-11-22T19:22:44,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742071_1247 (size=32039) 2024-11-22T19:22:44,568 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/2aaad0527624401489eceb4016d124ac as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2aaad0527624401489eceb4016d124ac 2024-11-22T19:22:44,574 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into 2aaad0527624401489eceb4016d124ac(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:44,574 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:44,574 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=12, startTime=1732303364374; duration=0sec 2024-11-22T19:22:44,574 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:44,574 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:44,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742072_1248 (size=13085) 2024-11-22T19:22:44,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:44,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:44,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:44,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:44,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:44,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:44,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:44,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cc487af7eb6646e9981150611c4ce6b9_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303363701/Put/seqid=0 2024-11-22T19:22:44,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742073_1249 (size=17534) 2024-11-22T19:22:44,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303424902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303424903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303424904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303424907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:44,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303424908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:44,990 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ab5a7b4963dd43038d2547ab39b6bcc2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ab5a7b4963dd43038d2547ab39b6bcc2 2024-11-22T19:22:44,998 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into ab5a7b4963dd43038d2547ab39b6bcc2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:44,998 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:44,998 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=12, startTime=1732303364379; duration=0sec 2024-11-22T19:22:44,998 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:44,998 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:45,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303425010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303425010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303425010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303425013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303425013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303425213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303425215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303425216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303425217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303425217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,286 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:45,295 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cc487af7eb6646e9981150611c4ce6b9_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cc487af7eb6646e9981150611c4ce6b9_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:45,303 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4e1c0195e7b24a0695aae0a65deb1798, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:45,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4e1c0195e7b24a0695aae0a65deb1798 is 175, key is test_row_0/A:col10/1732303363701/Put/seqid=0 2024-11-22T19:22:45,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742074_1250 (size=48639) 2024-11-22T19:22:45,342 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=375, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4e1c0195e7b24a0695aae0a65deb1798 2024-11-22T19:22:45,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/c9dac5bfeb974a60bb2f3d1087695bac is 50, key is test_row_0/B:col10/1732303363701/Put/seqid=0 2024-11-22T19:22:45,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T19:22:45,368 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-22T19:22:45,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:45,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-22T19:22:45,372 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:45,373 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:45,373 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:45,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T19:22:45,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742075_1251 (size=12301) 2024-11-22T19:22:45,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/c9dac5bfeb974a60bb2f3d1087695bac 2024-11-22T19:22:45,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/e62c728c364c4854a4db0d0d661660ae is 50, key is test_row_0/C:col10/1732303363701/Put/seqid=0 2024-11-22T19:22:45,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742076_1252 (size=12301) 2024-11-22T19:22:45,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/e62c728c364c4854a4db0d0d661660ae 2024-11-22T19:22:45,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4e1c0195e7b24a0695aae0a65deb1798 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798 2024-11-22T19:22:45,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798, entries=250, sequenceid=375, filesize=47.5 K 2024-11-22T19:22:45,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/c9dac5bfeb974a60bb2f3d1087695bac as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c9dac5bfeb974a60bb2f3d1087695bac 2024-11-22T19:22:45,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c9dac5bfeb974a60bb2f3d1087695bac, entries=150, sequenceid=375, filesize=12.0 K 2024-11-22T19:22:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/e62c728c364c4854a4db0d0d661660ae as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e62c728c364c4854a4db0d0d661660ae 2024-11-22T19:22:45,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e62c728c364c4854a4db0d0d661660ae, entries=150, sequenceid=375, filesize=12.0 K 2024-11-22T19:22:45,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 7de534ec4ea5964284edbc5ae1079040 in 630ms, sequenceid=375, compaction requested=false 2024-11-22T19:22:45,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T19:22:45,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:45,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T19:22:45,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:45,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:45,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:45,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:45,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:45,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:45,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:45,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T19:22:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112210976dec443a49dcafda42d6c10cf834_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:45,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303425542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303425545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303425545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303425545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303425546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742077_1253 (size=14994) 2024-11-22T19:22:45,575 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:45,587 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112210976dec443a49dcafda42d6c10cf834_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112210976dec443a49dcafda42d6c10cf834_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:45,590 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/db2d64d35c1b4923a79a6dd3491c7488, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:45,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/db2d64d35c1b4923a79a6dd3491c7488 is 175, key is test_row_0/A:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742078_1254 (size=39949) 2024-11-22T19:22:45,643 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/db2d64d35c1b4923a79a6dd3491c7488 2024-11-22T19:22:45,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303425647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/49e3ec1f7bf647a7b8d67a8b582f9689 is 50, key is test_row_0/B:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303425652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303425652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303425652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303425653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T19:22:45,681 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:45,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T19:22:45,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:45,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742079_1255 (size=12301) 2024-11-22T19:22:45,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/49e3ec1f7bf647a7b8d67a8b582f9689 2024-11-22T19:22:45,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9124c9f809f047f49b6346df80318275 is 50, key is test_row_0/C:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:45,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742080_1256 (size=12301) 2024-11-22T19:22:45,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:45,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T19:22:45,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:45,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303425850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303425856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303425856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303425856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303425858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T19:22:45,987 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:45,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T19:22:45,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:45,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:45,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:46,141 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:46,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T19:22:46,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:46,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:46,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:46,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303426152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303426160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303426160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303426162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303426163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9124c9f809f047f49b6346df80318275 2024-11-22T19:22:46,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/db2d64d35c1b4923a79a6dd3491c7488 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488 2024-11-22T19:22:46,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488, entries=200, sequenceid=395, filesize=39.0 K 2024-11-22T19:22:46,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/49e3ec1f7bf647a7b8d67a8b582f9689 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/49e3ec1f7bf647a7b8d67a8b582f9689 2024-11-22T19:22:46,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/49e3ec1f7bf647a7b8d67a8b582f9689, entries=150, sequenceid=395, filesize=12.0 K 2024-11-22T19:22:46,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9124c9f809f047f49b6346df80318275 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9124c9f809f047f49b6346df80318275 2024-11-22T19:22:46,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9124c9f809f047f49b6346df80318275, entries=150, sequenceid=395, filesize=12.0 K 2024-11-22T19:22:46,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 7de534ec4ea5964284edbc5ae1079040 in 696ms, sequenceid=395, compaction requested=true 2024-11-22T19:22:46,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:46,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:46,219 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:46,219 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:46,219 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,219 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2aaad0527624401489eceb4016d124ac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=117.8 K 2024-11-22T19:22:46,219 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,219 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2aaad0527624401489eceb4016d124ac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488] 2024-11-22T19:22:46,221 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2aaad0527624401489eceb4016d124ac, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732303362532 2024-11-22T19:22:46,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e1c0195e7b24a0695aae0a65deb1798, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732303363685 2024-11-22T19:22:46,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting db2d64d35c1b4923a79a6dd3491c7488, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303364902 2024-11-22T19:22:46,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:46,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:46,230 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:46,231 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:46,231 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:46,231 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,232 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/858c01ea420d4deaa55c762995c4b995, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c9dac5bfeb974a60bb2f3d1087695bac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/49e3ec1f7bf647a7b8d67a8b582f9689] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=36.8 K 2024-11-22T19:22:46,232 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 858c01ea420d4deaa55c762995c4b995, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732303362532 2024-11-22T19:22:46,233 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c9dac5bfeb974a60bb2f3d1087695bac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732303363685 2024-11-22T19:22:46,233 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 49e3ec1f7bf647a7b8d67a8b582f9689, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303364902 2024-11-22T19:22:46,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:46,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:46,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:46,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:46,245 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:46,248 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:46,248 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/aa07623f9497400982aba428fd78463f is 50, key is test_row_0/B:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:46,262 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122be69f14ab3ad4d0b8cbf3ffe34703100_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:46,264 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122be69f14ab3ad4d0b8cbf3ffe34703100_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:46,264 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122be69f14ab3ad4d0b8cbf3ffe34703100_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:46,296 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:46,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T19:22:46,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,298 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T19:22:46,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:46,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:46,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:46,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:46,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:46,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:46,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742081_1257 (size=13187) 2024-11-22T19:22:46,311 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/aa07623f9497400982aba428fd78463f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/aa07623f9497400982aba428fd78463f 2024-11-22T19:22:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742082_1258 (size=4469) 2024-11-22T19:22:46,321 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#213 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:46,321 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/e8ff260e28bd4121a2a05c0182a092ec is 175, key is test_row_0/A:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:46,323 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into aa07623f9497400982aba428fd78463f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:46,323 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:46,323 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=13, startTime=1732303366230; duration=0sec 2024-11-22T19:22:46,323 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:46,323 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:46,323 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:46,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:46,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:46,326 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:46,326 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ab5a7b4963dd43038d2547ab39b6bcc2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e62c728c364c4854a4db0d0d661660ae, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9124c9f809f047f49b6346df80318275] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=36.8 K 2024-11-22T19:22:46,328 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ab5a7b4963dd43038d2547ab39b6bcc2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732303362532 2024-11-22T19:22:46,329 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e62c728c364c4854a4db0d0d661660ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732303363685 2024-11-22T19:22:46,329 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9124c9f809f047f49b6346df80318275, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303364902 2024-11-22T19:22:46,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112214fc1782112a4584919dddd3a24619fe_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303365538/Put/seqid=0 2024-11-22T19:22:46,354 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:46,355 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/6aa3d29249e24f50a42f3e1b6d21440a is 50, key is test_row_0/C:col10/1732303364902/Put/seqid=0 2024-11-22T19:22:46,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742083_1259 (size=32141) 2024-11-22T19:22:46,375 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/e8ff260e28bd4121a2a05c0182a092ec as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/e8ff260e28bd4121a2a05c0182a092ec 2024-11-22T19:22:46,381 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into e8ff260e28bd4121a2a05c0182a092ec(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:46,381 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:46,381 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=13, startTime=1732303366217; duration=0sec 2024-11-22T19:22:46,382 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:46,382 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742084_1260 (size=12454) 2024-11-22T19:22:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742085_1261 (size=13187) 2024-11-22T19:22:46,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:46,409 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112214fc1782112a4584919dddd3a24619fe_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112214fc1782112a4584919dddd3a24619fe_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:46,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b08dedcfb6de40cf948788c520bb4a3b, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:46,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b08dedcfb6de40cf948788c520bb4a3b is 175, key is test_row_0/A:col10/1732303365538/Put/seqid=0 2024-11-22T19:22:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T19:22:46,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742086_1262 (size=31255) 2024-11-22T19:22:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:46,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:46,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303426687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303426688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303426689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303426690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303426690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303426792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303426792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303426792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303426793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303426795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:46,806 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/6aa3d29249e24f50a42f3e1b6d21440a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6aa3d29249e24f50a42f3e1b6d21440a 2024-11-22T19:22:46,814 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into 6aa3d29249e24f50a42f3e1b6d21440a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:46,814 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:46,814 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=13, startTime=1732303366234; duration=0sec 2024-11-22T19:22:46,814 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:46,814 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:46,874 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=413, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b08dedcfb6de40cf948788c520bb4a3b 2024-11-22T19:22:46,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/925bff0b45a046e2ad818c8a50ab425a is 50, key is test_row_0/B:col10/1732303365538/Put/seqid=0 2024-11-22T19:22:46,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742087_1263 (size=12301) 2024-11-22T19:22:46,923 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/925bff0b45a046e2ad818c8a50ab425a 2024-11-22T19:22:46,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ebd8ba035c78422f808a6677f7df90cc is 50, key is test_row_0/C:col10/1732303365538/Put/seqid=0 2024-11-22T19:22:46,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742088_1264 (size=12301) 2024-11-22T19:22:46,977 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ebd8ba035c78422f808a6677f7df90cc 2024-11-22T19:22:46,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/b08dedcfb6de40cf948788c520bb4a3b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b 2024-11-22T19:22:46,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b, entries=150, sequenceid=413, filesize=30.5 K 2024-11-22T19:22:46,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/925bff0b45a046e2ad818c8a50ab425a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/925bff0b45a046e2ad818c8a50ab425a 2024-11-22T19:22:47,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303426997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303426998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303426998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303427001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303426997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,006 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/925bff0b45a046e2ad818c8a50ab425a, entries=150, sequenceid=413, filesize=12.0 K 2024-11-22T19:22:47,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/ebd8ba035c78422f808a6677f7df90cc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ebd8ba035c78422f808a6677f7df90cc 2024-11-22T19:22:47,012 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ebd8ba035c78422f808a6677f7df90cc, entries=150, sequenceid=413, filesize=12.0 K 2024-11-22T19:22:47,014 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 7de534ec4ea5964284edbc5ae1079040 in 716ms, sequenceid=413, compaction requested=false 2024-11-22T19:22:47,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:47,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-22T19:22:47,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-22T19:22:47,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-22T19:22:47,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6420 sec 2024-11-22T19:22:47,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.6480 sec 2024-11-22T19:22:47,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:47,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T19:22:47,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:47,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:47,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:47,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:47,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:47,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:47,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303427324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303427326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303427327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303427329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303427329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fa72cac49e0d404bace59c617f3c1c4a_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:47,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742089_1265 (size=17534) 2024-11-22T19:22:47,359 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:47,366 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fa72cac49e0d404bace59c617f3c1c4a_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fa72cac49e0d404bace59c617f3c1c4a_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:47,368 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4040bbfc24904de58a364d514b653cb9, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:47,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4040bbfc24904de58a364d514b653cb9 is 175, key is test_row_0/A:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:47,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742090_1266 (size=48639) 2024-11-22T19:22:47,389 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=437, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4040bbfc24904de58a364d514b653cb9 2024-11-22T19:22:47,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/359e972e7ac44001acba7b771b99e55b is 50, key is test_row_0/B:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:47,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303427430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303427433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303427433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303427433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303427433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742091_1267 (size=12301) 2024-11-22T19:22:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T19:22:47,478 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-22T19:22:47,479 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:47,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-22T19:22:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T19:22:47,485 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:47,486 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:47,486 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:47,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T19:22:47,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303427632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,641 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:47,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T19:22:47,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303427638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303427638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:47,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:47,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:47,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303427638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303427639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:47,814 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:47,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T19:22:47,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:47,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T19:22:47,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:47,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/359e972e7ac44001acba7b771b99e55b 2024-11-22T19:22:47,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0acbc5ae0aa248f8bc78863873b4e59f is 50, key is test_row_0/C:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:47,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742092_1268 (size=12301) 2024-11-22T19:22:47,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0acbc5ae0aa248f8bc78863873b4e59f 2024-11-22T19:22:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4040bbfc24904de58a364d514b653cb9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9 2024-11-22T19:22:47,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9, entries=250, sequenceid=437, filesize=47.5 K 2024-11-22T19:22:47,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/359e972e7ac44001acba7b771b99e55b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/359e972e7ac44001acba7b771b99e55b 2024-11-22T19:22:47,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303427936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/359e972e7ac44001acba7b771b99e55b, entries=150, sequenceid=437, filesize=12.0 K 2024-11-22T19:22:47,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303427943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303427944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303427946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:47,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303427946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:47,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0acbc5ae0aa248f8bc78863873b4e59f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0acbc5ae0aa248f8bc78863873b4e59f 2024-11-22T19:22:47,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0acbc5ae0aa248f8bc78863873b4e59f, entries=150, sequenceid=437, filesize=12.0 K 2024-11-22T19:22:47,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 7de534ec4ea5964284edbc5ae1079040 in 649ms, sequenceid=437, compaction requested=true 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:47,957 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:47,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:22:47,957 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:47,958 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112035 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:47,958 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:47,958 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,959 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/e8ff260e28bd4121a2a05c0182a092ec, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=109.4 K 2024-11-22T19:22:47,959 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,959 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/e8ff260e28bd4121a2a05c0182a092ec, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9] 2024-11-22T19:22:47,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:47,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:47,959 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,959 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/aa07623f9497400982aba428fd78463f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/925bff0b45a046e2ad818c8a50ab425a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/359e972e7ac44001acba7b771b99e55b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=36.9 K 2024-11-22T19:22:47,959 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8ff260e28bd4121a2a05c0182a092ec, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303364902 2024-11-22T19:22:47,963 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting aa07623f9497400982aba428fd78463f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303364902 2024-11-22T19:22:47,964 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b08dedcfb6de40cf948788c520bb4a3b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732303365538 2024-11-22T19:22:47,964 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 925bff0b45a046e2ad818c8a50ab425a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732303365538 2024-11-22T19:22:47,964 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4040bbfc24904de58a364d514b653cb9, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732303366675 2024-11-22T19:22:47,964 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 359e972e7ac44001acba7b771b99e55b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732303366688 2024-11-22T19:22:47,969 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:47,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T19:22:47,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:47,970 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T19:22:47,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:47,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:47,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:47,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:47,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:47,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:47,976 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:47,977 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#222 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:47,978 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/bb80f219fd5641479f7c5e3b973c8b59 is 50, key is test_row_0/B:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:48,013 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122ea06c41179a0414b8f9759a2638bf334_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:48,015 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122ea06c41179a0414b8f9759a2638bf334_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:48,015 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ea06c41179a0414b8f9759a2638bf334_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:48,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222cbd4595f2b14f788186eda64e0d9618_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303367326/Put/seqid=0 2024-11-22T19:22:48,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742093_1269 (size=13289) 2024-11-22T19:22:48,078 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/bb80f219fd5641479f7c5e3b973c8b59 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/bb80f219fd5641479f7c5e3b973c8b59 2024-11-22T19:22:48,086 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into bb80f219fd5641479f7c5e3b973c8b59(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:48,086 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:48,086 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=13, startTime=1732303367957; duration=0sec 2024-11-22T19:22:48,086 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:48,088 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:48,088 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:48,090 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:48,090 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:48,090 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:48,090 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6aa3d29249e24f50a42f3e1b6d21440a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ebd8ba035c78422f808a6677f7df90cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0acbc5ae0aa248f8bc78863873b4e59f] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=36.9 K 2024-11-22T19:22:48,090 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6aa3d29249e24f50a42f3e1b6d21440a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732303364902 2024-11-22T19:22:48,091 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ebd8ba035c78422f808a6677f7df90cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732303365538 2024-11-22T19:22:48,091 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0acbc5ae0aa248f8bc78863873b4e59f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732303366688 2024-11-22T19:22:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742094_1270 (size=4469) 2024-11-22T19:22:48,110 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#223 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:48,111 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/3a41bb3b077f479292cfdf3e5f4d0a4d is 175, key is test_row_0/A:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:48,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T19:22:48,127 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#225 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:48,127 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/aa5d342bfe274cd39bd6ea9292a7808f is 50, key is test_row_0/C:col10/1732303367306/Put/seqid=0 2024-11-22T19:22:48,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742095_1271 (size=12454) 2024-11-22T19:22:48,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742096_1272 (size=32243) 2024-11-22T19:22:48,176 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/3a41bb3b077f479292cfdf3e5f4d0a4d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/3a41bb3b077f479292cfdf3e5f4d0a4d 2024-11-22T19:22:48,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742097_1273 (size=13289) 2024-11-22T19:22:48,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into 3a41bb3b077f479292cfdf3e5f4d0a4d(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:48,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:48,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=13, startTime=1732303367957; duration=0sec 2024-11-22T19:22:48,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:48,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:48,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:48,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303428470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303428473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303428474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303428475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303428477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:48,553 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222cbd4595f2b14f788186eda64e0d9618_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222cbd4595f2b14f788186eda64e0d9618_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:48,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/bd57a5cae0e2480d9a49f762ce1416ec, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:48,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/bd57a5cae0e2480d9a49f762ce1416ec is 175, key is test_row_0/A:col10/1732303367326/Put/seqid=0 2024-11-22T19:22:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742098_1274 (size=31255) 2024-11-22T19:22:48,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303428576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303428580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303428582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303428583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303428583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,590 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/aa5d342bfe274cd39bd6ea9292a7808f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/aa5d342bfe274cd39bd6ea9292a7808f 2024-11-22T19:22:48,597 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into aa5d342bfe274cd39bd6ea9292a7808f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:48,597 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:48,597 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=13, startTime=1732303367957; duration=0sec 2024-11-22T19:22:48,598 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:48,598 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:48,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T19:22:48,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303428780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303428785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303428788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303428789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:48,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303428790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:48,969 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=453, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/bd57a5cae0e2480d9a49f762ce1416ec 2024-11-22T19:22:48,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/efbed0933b244b63b85625f3da91f4d4 is 50, key is test_row_0/B:col10/1732303367326/Put/seqid=0 2024-11-22T19:22:49,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742099_1275 (size=12301) 2024-11-22T19:22:49,015 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/efbed0933b244b63b85625f3da91f4d4 2024-11-22T19:22:49,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/03f96079b22440e6b58d0f48d5c02964 is 50, key is test_row_0/C:col10/1732303367326/Put/seqid=0 2024-11-22T19:22:49,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742100_1276 (size=12301) 2024-11-22T19:22:49,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303429087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303429087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303429094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303429095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303429097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,442 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/03f96079b22440e6b58d0f48d5c02964 2024-11-22T19:22:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/bd57a5cae0e2480d9a49f762ce1416ec as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec 2024-11-22T19:22:49,457 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec, entries=150, sequenceid=453, filesize=30.5 K 2024-11-22T19:22:49,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/efbed0933b244b63b85625f3da91f4d4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/efbed0933b244b63b85625f3da91f4d4 2024-11-22T19:22:49,464 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/efbed0933b244b63b85625f3da91f4d4, entries=150, sequenceid=453, filesize=12.0 K 2024-11-22T19:22:49,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/03f96079b22440e6b58d0f48d5c02964 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/03f96079b22440e6b58d0f48d5c02964 2024-11-22T19:22:49,472 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/03f96079b22440e6b58d0f48d5c02964, entries=150, sequenceid=453, filesize=12.0 K 2024-11-22T19:22:49,474 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 7de534ec4ea5964284edbc5ae1079040 in 1504ms, sequenceid=453, compaction requested=false 2024-11-22T19:22:49,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:49,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:49,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-22T19:22:49,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-22T19:22:49,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-22T19:22:49,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9890 sec 2024-11-22T19:22:49,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.0000 sec 2024-11-22T19:22:49,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T19:22:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:49,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:49,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:49,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:49,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:49,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:49,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:49,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fb8f0e682bbb49eb88c0a14c2f166e1e_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T19:22:49,620 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-22T19:22:49,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-22T19:22:49,623 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:49,624 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T19:22:49,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:49,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303429617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303429619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303429619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303429628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303429629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742101_1277 (size=17534) 2024-11-22T19:22:49,661 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:49,666 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fb8f0e682bbb49eb88c0a14c2f166e1e_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fb8f0e682bbb49eb88c0a14c2f166e1e_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:49,667 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/cbef80f9cbdf4bbb99489d677c479665, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:49,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/cbef80f9cbdf4bbb99489d677c479665 is 175, key is test_row_0/A:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:49,690 DEBUG [Thread-743 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:57120 2024-11-22T19:22:49,690 DEBUG [Thread-741 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:57120 2024-11-22T19:22:49,691 DEBUG [Thread-743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:49,691 DEBUG [Thread-741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:49,695 DEBUG [Thread-745 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:57120 2024-11-22T19:22:49,695 DEBUG [Thread-745 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:49,695 DEBUG [Thread-747 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:57120 2024-11-22T19:22:49,695 DEBUG [Thread-747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:49,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742102_1278 (size=48639) 2024-11-22T19:22:49,700 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=479, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/cbef80f9cbdf4bbb99489d677c479665 2024-11-22T19:22:49,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/789af020a2094db886d9c7e58f3798cf is 50, key is test_row_0/B:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T19:22:49,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303429729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303429729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303429730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303429732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303429732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742103_1279 (size=12301) 2024-11-22T19:22:49,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/789af020a2094db886d9c7e58f3798cf 2024-11-22T19:22:49,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0559dad426c440789ea6fc1372459b1e is 50, key is test_row_0/C:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742104_1280 (size=12301) 2024-11-22T19:22:49,777 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:49,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-22T19:22:49,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:49,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:49,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:49,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:49,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:49,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T19:22:49,931 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:49,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-22T19:22:49,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42480 deadline: 1732303429931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42494 deadline: 1732303429931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:49,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:49,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:49,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:49,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42452 deadline: 1732303429932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42510 deadline: 1732303429934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:49,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:49,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42466 deadline: 1732303429935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:50,084 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:50,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-22T19:22:50,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:50,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:50,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:50,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:50,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0559dad426c440789ea6fc1372459b1e 2024-11-22T19:22:50,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/cbef80f9cbdf4bbb99489d677c479665 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665 2024-11-22T19:22:50,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665, entries=250, sequenceid=479, filesize=47.5 K 2024-11-22T19:22:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/789af020a2094db886d9c7e58f3798cf as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/789af020a2094db886d9c7e58f3798cf 2024-11-22T19:22:50,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/789af020a2094db886d9c7e58f3798cf, entries=150, sequenceid=479, filesize=12.0 K 2024-11-22T19:22:50,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/0559dad426c440789ea6fc1372459b1e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0559dad426c440789ea6fc1372459b1e 2024-11-22T19:22:50,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0559dad426c440789ea6fc1372459b1e, entries=150, sequenceid=479, filesize=12.0 K 2024-11-22T19:22:50,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 7de534ec4ea5964284edbc5ae1079040 in 593ms, sequenceid=479, compaction requested=true 2024-11-22T19:22:50,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:50,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:50,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:50,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:50,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:50,193 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:50,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7de534ec4ea5964284edbc5ae1079040:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:50,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:50,193 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112137 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/B is initiating minor compaction (all files) 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/A is initiating minor compaction (all files) 2024-11-22T19:22:50,194 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/B in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/A in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,194 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/bb80f219fd5641479f7c5e3b973c8b59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/efbed0933b244b63b85625f3da91f4d4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/789af020a2094db886d9c7e58f3798cf] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=37.0 K 2024-11-22T19:22:50,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/3a41bb3b077f479292cfdf3e5f4d0a4d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=109.5 K 2024-11-22T19:22:50,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/3a41bb3b077f479292cfdf3e5f4d0a4d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665] 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting bb80f219fd5641479f7c5e3b973c8b59, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732303366688 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a41bb3b077f479292cfdf3e5f4d0a4d, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732303366688 2024-11-22T19:22:50,194 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting efbed0933b244b63b85625f3da91f4d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732303367322 2024-11-22T19:22:50,195 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd57a5cae0e2480d9a49f762ce1416ec, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732303367322 2024-11-22T19:22:50,195 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 789af020a2094db886d9c7e58f3798cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732303368473 2024-11-22T19:22:50,195 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbef80f9cbdf4bbb99489d677c479665, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732303368470 2024-11-22T19:22:50,205 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#B#compaction#231 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:50,205 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6ce46bbcd1de49cf83eb602abae5e2fa is 50, key is test_row_0/B:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:50,208 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:50,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742105_1281 (size=13391) 2024-11-22T19:22:50,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T19:22:50,228 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122095465c780ea46f0b383e9ae28060e0c_7de534ec4ea5964284edbc5ae1079040 store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:50,237 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:50,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-22T19:22:50,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,240 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:22:50,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:50,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. as already flushing 2024-11-22T19:22:50,254 DEBUG [Thread-730 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:57120 2024-11-22T19:22:50,254 DEBUG [Thread-730 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:50,264 DEBUG [Thread-738 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:57120 2024-11-22T19:22:50,264 DEBUG [Thread-738 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:50,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:50,264 DEBUG [Thread-736 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:57120 2024-11-22T19:22:50,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:50,264 DEBUG [Thread-736 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:50,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:50,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:50,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:50,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:50,265 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122095465c780ea46f0b383e9ae28060e0c_7de534ec4ea5964284edbc5ae1079040, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:50,265 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122095465c780ea46f0b383e9ae28060e0c_7de534ec4ea5964284edbc5ae1079040 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:50,267 DEBUG [Thread-732 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:57120 2024-11-22T19:22:50,267 DEBUG [Thread-732 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:50,269 DEBUG [Thread-734 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:57120 2024-11-22T19:22:50,269 DEBUG [Thread-734 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:50,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742106_1282 (size=4469) 2024-11-22T19:22:50,284 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#A#compaction#232 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:50,285 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/bd47152edfcf4737b0fd963f74a85195 is 175, key is test_row_0/A:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:50,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112220a4837337e741cb9bada8841b054eb9_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303370234/Put/seqid=0 2024-11-22T19:22:50,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742107_1283 (size=32345) 2024-11-22T19:22:50,318 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/bd47152edfcf4737b0fd963f74a85195 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd47152edfcf4737b0fd963f74a85195 2024-11-22T19:22:50,325 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/A of 7de534ec4ea5964284edbc5ae1079040 into bd47152edfcf4737b0fd963f74a85195(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:50,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:50,325 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/A, priority=13, startTime=1732303370192; duration=0sec 2024-11-22T19:22:50,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:50,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:A 2024-11-22T19:22:50,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:50,326 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:50,326 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 7de534ec4ea5964284edbc5ae1079040/C is initiating minor compaction (all files) 2024-11-22T19:22:50,326 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7de534ec4ea5964284edbc5ae1079040/C in TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,326 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/aa5d342bfe274cd39bd6ea9292a7808f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/03f96079b22440e6b58d0f48d5c02964, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0559dad426c440789ea6fc1372459b1e] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp, totalSize=37.0 K 2024-11-22T19:22:50,327 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa5d342bfe274cd39bd6ea9292a7808f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732303366688 2024-11-22T19:22:50,327 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03f96079b22440e6b58d0f48d5c02964, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732303367322 2024-11-22T19:22:50,327 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0559dad426c440789ea6fc1372459b1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732303368473 2024-11-22T19:22:50,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742108_1284 (size=12454) 2024-11-22T19:22:50,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:50,354 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7de534ec4ea5964284edbc5ae1079040#C#compaction#234 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:50,357 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/d8445dd683324c99afab51f2982cb0d0 is 50, key is test_row_0/C:col10/1732303369595/Put/seqid=0 2024-11-22T19:22:50,360 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112220a4837337e741cb9bada8841b054eb9_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112220a4837337e741cb9bada8841b054eb9_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:50,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/18882afdb3754ed6b673f15276928a80, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:50,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/18882afdb3754ed6b673f15276928a80 is 175, key is test_row_0/A:col10/1732303370234/Put/seqid=0 2024-11-22T19:22:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742109_1285 (size=13391) 2024-11-22T19:22:50,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742110_1286 (size=31255) 2024-11-22T19:22:50,403 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=496, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/18882afdb3754ed6b673f15276928a80 2024-11-22T19:22:50,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/ad1645a048c14f4eb7a9b4f6ead6d5e2 is 50, key is test_row_0/B:col10/1732303370234/Put/seqid=0 2024-11-22T19:22:50,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742111_1287 (size=12301) 2024-11-22T19:22:50,434 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/ad1645a048c14f4eb7a9b4f6ead6d5e2 2024-11-22T19:22:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9cc69faabdd64e34b813769bfa451266 is 50, key is test_row_0/C:col10/1732303370234/Put/seqid=0 2024-11-22T19:22:50,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742112_1288 (size=12301) 2024-11-22T19:22:50,479 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9cc69faabdd64e34b813769bfa451266 2024-11-22T19:22:50,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/18882afdb3754ed6b673f15276928a80 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/18882afdb3754ed6b673f15276928a80 2024-11-22T19:22:50,498 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/18882afdb3754ed6b673f15276928a80, entries=150, sequenceid=496, filesize=30.5 K 2024-11-22T19:22:50,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/ad1645a048c14f4eb7a9b4f6ead6d5e2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad1645a048c14f4eb7a9b4f6ead6d5e2 2024-11-22T19:22:50,507 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad1645a048c14f4eb7a9b4f6ead6d5e2, entries=150, sequenceid=496, filesize=12.0 K 2024-11-22T19:22:50,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/9cc69faabdd64e34b813769bfa451266 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9cc69faabdd64e34b813769bfa451266 2024-11-22T19:22:50,512 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9cc69faabdd64e34b813769bfa451266, entries=150, sequenceid=496, filesize=12.0 K 2024-11-22T19:22:50,514 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=13.42 KB/13740 for 7de534ec4ea5964284edbc5ae1079040 in 274ms, sequenceid=496, compaction requested=false 2024-11-22T19:22:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-22T19:22:50,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-22T19:22:50,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-22T19:22:50,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 891 msec 2024-11-22T19:22:50,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 895 msec 2024-11-22T19:22:50,626 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/6ce46bbcd1de49cf83eb602abae5e2fa as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6ce46bbcd1de49cf83eb602abae5e2fa 2024-11-22T19:22:50,631 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/B of 7de534ec4ea5964284edbc5ae1079040 into 6ce46bbcd1de49cf83eb602abae5e2fa(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:50,631 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:50,631 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/B, priority=13, startTime=1732303370193; duration=0sec 2024-11-22T19:22:50,631 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:50,631 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:B 2024-11-22T19:22:50,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T19:22:50,728 INFO [Thread-740 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 105 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-22T19:22:50,728 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4744 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4690 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2066 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6198 rows 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2051 2024-11-22T19:22:50,729 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6153 rows 2024-11-22T19:22:50,729 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:22:50,729 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ff872d8 to 127.0.0.1:57120 2024-11-22T19:22:50,729 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:22:50,732 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T19:22:50,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T19:22:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T19:22:50,738 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303370738"}]},"ts":"1732303370738"} 2024-11-22T19:22:50,739 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T19:22:50,742 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T19:22:50,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:22:50,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, UNASSIGN}] 2024-11-22T19:22:50,747 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, UNASSIGN 2024-11-22T19:22:50,748 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:50,748 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:22:50,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:50,793 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/d8445dd683324c99afab51f2982cb0d0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d8445dd683324c99afab51f2982cb0d0 2024-11-22T19:22:50,798 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7de534ec4ea5964284edbc5ae1079040/C of 7de534ec4ea5964284edbc5ae1079040 into d8445dd683324c99afab51f2982cb0d0(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:50,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:50,799 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040., storeName=7de534ec4ea5964284edbc5ae1079040/C, priority=13, startTime=1732303370193; duration=0sec 2024-11-22T19:22:50,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:50,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7de534ec4ea5964284edbc5ae1079040:C 2024-11-22T19:22:50,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T19:22:50,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:50,901 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:50,901 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:22:50,901 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 7de534ec4ea5964284edbc5ae1079040, disabling compactions & flushes 2024-11-22T19:22:50,901 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,901 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,901 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. after waiting 0 ms 2024-11-22T19:22:50,901 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:50,901 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 7de534ec4ea5964284edbc5ae1079040 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-22T19:22:50,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=A 2024-11-22T19:22:50,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:50,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=B 2024-11-22T19:22:50,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:50,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7de534ec4ea5964284edbc5ae1079040, store=C 2024-11-22T19:22:50,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:50,915 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c8807a2c450848a6a9b19381e18b8035_7de534ec4ea5964284edbc5ae1079040 is 50, key is test_row_0/A:col10/1732303370265/Put/seqid=0 2024-11-22T19:22:50,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742113_1289 (size=9914) 2024-11-22T19:22:50,921 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:50,926 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c8807a2c450848a6a9b19381e18b8035_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c8807a2c450848a6a9b19381e18b8035_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:50,927 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4f5cb1dfeae44f039fb94750dc9d4fce, store: [table=TestAcidGuarantees family=A region=7de534ec4ea5964284edbc5ae1079040] 2024-11-22T19:22:50,928 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4f5cb1dfeae44f039fb94750dc9d4fce is 175, key is test_row_0/A:col10/1732303370265/Put/seqid=0 2024-11-22T19:22:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742114_1290 (size=22561) 2024-11-22T19:22:50,934 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=504, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4f5cb1dfeae44f039fb94750dc9d4fce 2024-11-22T19:22:50,943 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/b1caba6e58e641eb8d6a0041073d434a is 50, key is test_row_0/B:col10/1732303370265/Put/seqid=0 2024-11-22T19:22:50,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742115_1291 (size=9857) 2024-11-22T19:22:50,956 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/b1caba6e58e641eb8d6a0041073d434a 2024-11-22T19:22:50,965 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/f421248582b640c0b8a1f071fdf66209 is 50, key is test_row_0/C:col10/1732303370265/Put/seqid=0 2024-11-22T19:22:50,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742116_1292 (size=9857) 2024-11-22T19:22:51,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T19:22:51,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T19:22:51,375 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/f421248582b640c0b8a1f071fdf66209 2024-11-22T19:22:51,380 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/A/4f5cb1dfeae44f039fb94750dc9d4fce as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4f5cb1dfeae44f039fb94750dc9d4fce 2024-11-22T19:22:51,385 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4f5cb1dfeae44f039fb94750dc9d4fce, entries=100, sequenceid=504, filesize=22.0 K 2024-11-22T19:22:51,386 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/B/b1caba6e58e641eb8d6a0041073d434a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/b1caba6e58e641eb8d6a0041073d434a 2024-11-22T19:22:51,393 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/b1caba6e58e641eb8d6a0041073d434a, entries=100, sequenceid=504, filesize=9.6 K 2024-11-22T19:22:51,394 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/.tmp/C/f421248582b640c0b8a1f071fdf66209 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f421248582b640c0b8a1f071fdf66209 2024-11-22T19:22:51,398 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f421248582b640c0b8a1f071fdf66209, entries=100, sequenceid=504, filesize=9.6 K 2024-11-22T19:22:51,399 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 7de534ec4ea5964284edbc5ae1079040 in 497ms, sequenceid=504, compaction requested=true 2024-11-22T19:22:51,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/847941e62f1d4794b8929260e018d016, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b19cd3f9da9c4872a94a5030e9f91e75, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/5482a00a62e14cfd8474ad19b71e7cd5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2e6c95bad4b04a769057680403acab6b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db02aca7584e461cb242539fc5835a1c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2aaad0527624401489eceb4016d124ac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/e8ff260e28bd4121a2a05c0182a092ec, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/3a41bb3b077f479292cfdf3e5f4d0a4d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665] to archive 2024-11-22T19:22:51,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:22:51,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/c4287c8cadb84e19a9190c30d12c872c 2024-11-22T19:22:51,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/56a8e5cf3ece45b9a2dfb37945618dfb 2024-11-22T19:22:51,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d2dbcc17a124405c88d8e34d37e6c287 2024-11-22T19:22:51,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/847941e62f1d4794b8929260e018d016 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/847941e62f1d4794b8929260e018d016 2024-11-22T19:22:51,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/d655bb7d6af04cf8b1d088d08592c12f 2024-11-22T19:22:51,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ec21766d595141f28e3dd3d947fcf7ba 2024-11-22T19:22:51,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b19cd3f9da9c4872a94a5030e9f91e75 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b19cd3f9da9c4872a94a5030e9f91e75 2024-11-22T19:22:51,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a7535a5349e04d39bdea411c74d89cad 2024-11-22T19:22:51,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/9e42beea5cad4f4a9288dd87a9daad10 2024-11-22T19:22:51,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/73481d5f42c949dca590f9ee7e0a2bd1 2024-11-22T19:22:51,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/ac81ba1786bc4d398f9eb1b3db803451 2024-11-22T19:22:51,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/5482a00a62e14cfd8474ad19b71e7cd5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/5482a00a62e14cfd8474ad19b71e7cd5 2024-11-22T19:22:51,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/24c2a954cefb44c3b3e2d1dfbf02e16b 2024-11-22T19:22:51,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/6e31f037d7c04f99a96945dfc1557f59 2024-11-22T19:22:51,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2e6c95bad4b04a769057680403acab6b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2e6c95bad4b04a769057680403acab6b 2024-11-22T19:22:51,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/7a2fd55cd96d40e9a910b55b09b14a5a 2024-11-22T19:22:51,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/f8d66935835e4ac89cfd9d8e68f3c9c0 2024-11-22T19:22:51,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/146df1bc568d4f81b3ce544c0942be3b 2024-11-22T19:22:51,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db02aca7584e461cb242539fc5835a1c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db02aca7584e461cb242539fc5835a1c 2024-11-22T19:22:51,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/a863583632ee44a1b8cc343cbbb927ef 2024-11-22T19:22:51,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/58017801bf4443ec8f7f13f65d33aa55 2024-11-22T19:22:51,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/115ad87b126141669e859c7f880e0f27 2024-11-22T19:22:51,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2aaad0527624401489eceb4016d124ac to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/2aaad0527624401489eceb4016d124ac 2024-11-22T19:22:51,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b1c28a196c7e4c128b9f2c23cd152a32 2024-11-22T19:22:51,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4e1c0195e7b24a0695aae0a65deb1798 2024-11-22T19:22:51,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/db2d64d35c1b4923a79a6dd3491c7488 2024-11-22T19:22:51,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/e8ff260e28bd4121a2a05c0182a092ec to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/e8ff260e28bd4121a2a05c0182a092ec 2024-11-22T19:22:51,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/b08dedcfb6de40cf948788c520bb4a3b 2024-11-22T19:22:51,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4040bbfc24904de58a364d514b653cb9 2024-11-22T19:22:51,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/3a41bb3b077f479292cfdf3e5f4d0a4d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/3a41bb3b077f479292cfdf3e5f4d0a4d 2024-11-22T19:22:51,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd57a5cae0e2480d9a49f762ce1416ec 2024-11-22T19:22:51,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/cbef80f9cbdf4bbb99489d677c479665 2024-11-22T19:22:51,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/246682c29fe84b60aa60e104611b64d5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad4dc90a25d84c1d8b310f5967a9b294, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2556c261bfea46babb30d0ece8d29881, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e764817545544566812a601c1ab4ca02, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2b44e321cd534844ac717483eaf16513, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/cf6a3dc0af6141fc8e08e5061fcb3870, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/14ce0edb2b824882b35c646cfd4499f5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/28c7dd371e0f4c888cdc583b457c56c2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8d6a1e42ed3045e281c8582f8386083a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c5ae991650424ef48e33bb8c6809b335, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/668c9ee44e144dcb80eb739eb1e6858f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/0897412de82749c39cc069e8aeb3bd81, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8cfd0af0ab494356b38636c31795d74c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/07543b89e57449deb0a73920e2180d72, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6175e4265bcb49f293a8546553150c95, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/33505ea5915f4e08b4a5f9be4425a217, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e68952eb4a2244bcbc99c5a0faf6e0de, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4375a22ea1104a6bba1d139278de201a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4bc1a8dc70bd4f9f8c1abad7991bb97e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6b730175e94a4df79f6bb2e0ce4d6b00, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/1ad4473a599b4614bdaa4432ef47ae5c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/858c01ea420d4deaa55c762995c4b995, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8f0406f57f554733aefa2fafb56abc27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c9dac5bfeb974a60bb2f3d1087695bac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/aa07623f9497400982aba428fd78463f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/49e3ec1f7bf647a7b8d67a8b582f9689, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/925bff0b45a046e2ad818c8a50ab425a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/bb80f219fd5641479f7c5e3b973c8b59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/359e972e7ac44001acba7b771b99e55b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/efbed0933b244b63b85625f3da91f4d4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/789af020a2094db886d9c7e58f3798cf] to archive 2024-11-22T19:22:51,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:22:51,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/246682c29fe84b60aa60e104611b64d5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/246682c29fe84b60aa60e104611b64d5 2024-11-22T19:22:51,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad4dc90a25d84c1d8b310f5967a9b294 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad4dc90a25d84c1d8b310f5967a9b294 2024-11-22T19:22:51,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2556c261bfea46babb30d0ece8d29881 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2556c261bfea46babb30d0ece8d29881 2024-11-22T19:22:51,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e764817545544566812a601c1ab4ca02 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e764817545544566812a601c1ab4ca02 2024-11-22T19:22:51,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2b44e321cd534844ac717483eaf16513 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/2b44e321cd534844ac717483eaf16513 2024-11-22T19:22:51,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/cf6a3dc0af6141fc8e08e5061fcb3870 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/cf6a3dc0af6141fc8e08e5061fcb3870 2024-11-22T19:22:51,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/14ce0edb2b824882b35c646cfd4499f5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/14ce0edb2b824882b35c646cfd4499f5 2024-11-22T19:22:51,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/28c7dd371e0f4c888cdc583b457c56c2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/28c7dd371e0f4c888cdc583b457c56c2 2024-11-22T19:22:51,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8d6a1e42ed3045e281c8582f8386083a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8d6a1e42ed3045e281c8582f8386083a 2024-11-22T19:22:51,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c5ae991650424ef48e33bb8c6809b335 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c5ae991650424ef48e33bb8c6809b335 2024-11-22T19:22:51,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/668c9ee44e144dcb80eb739eb1e6858f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/668c9ee44e144dcb80eb739eb1e6858f 2024-11-22T19:22:51,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/0897412de82749c39cc069e8aeb3bd81 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/0897412de82749c39cc069e8aeb3bd81 2024-11-22T19:22:51,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8cfd0af0ab494356b38636c31795d74c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8cfd0af0ab494356b38636c31795d74c 2024-11-22T19:22:51,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/07543b89e57449deb0a73920e2180d72 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/07543b89e57449deb0a73920e2180d72 2024-11-22T19:22:51,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/f5d2cf31e9314d2f83e7c3fdd2e18ce3 2024-11-22T19:22:51,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6175e4265bcb49f293a8546553150c95 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6175e4265bcb49f293a8546553150c95 2024-11-22T19:22:51,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/33505ea5915f4e08b4a5f9be4425a217 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/33505ea5915f4e08b4a5f9be4425a217 2024-11-22T19:22:51,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e68952eb4a2244bcbc99c5a0faf6e0de to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/e68952eb4a2244bcbc99c5a0faf6e0de 2024-11-22T19:22:51,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4375a22ea1104a6bba1d139278de201a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4375a22ea1104a6bba1d139278de201a 2024-11-22T19:22:51,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4bc1a8dc70bd4f9f8c1abad7991bb97e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/4bc1a8dc70bd4f9f8c1abad7991bb97e 2024-11-22T19:22:51,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6b730175e94a4df79f6bb2e0ce4d6b00 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6b730175e94a4df79f6bb2e0ce4d6b00 2024-11-22T19:22:51,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/1ad4473a599b4614bdaa4432ef47ae5c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/1ad4473a599b4614bdaa4432ef47ae5c 2024-11-22T19:22:51,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/858c01ea420d4deaa55c762995c4b995 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/858c01ea420d4deaa55c762995c4b995 2024-11-22T19:22:51,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8f0406f57f554733aefa2fafb56abc27 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/8f0406f57f554733aefa2fafb56abc27 2024-11-22T19:22:51,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c9dac5bfeb974a60bb2f3d1087695bac to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/c9dac5bfeb974a60bb2f3d1087695bac 2024-11-22T19:22:51,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/aa07623f9497400982aba428fd78463f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/aa07623f9497400982aba428fd78463f 2024-11-22T19:22:51,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/49e3ec1f7bf647a7b8d67a8b582f9689 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/49e3ec1f7bf647a7b8d67a8b582f9689 2024-11-22T19:22:51,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/925bff0b45a046e2ad818c8a50ab425a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/925bff0b45a046e2ad818c8a50ab425a 2024-11-22T19:22:51,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/bb80f219fd5641479f7c5e3b973c8b59 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/bb80f219fd5641479f7c5e3b973c8b59 2024-11-22T19:22:51,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/359e972e7ac44001acba7b771b99e55b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/359e972e7ac44001acba7b771b99e55b 2024-11-22T19:22:51,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/efbed0933b244b63b85625f3da91f4d4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/efbed0933b244b63b85625f3da91f4d4 2024-11-22T19:22:51,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/789af020a2094db886d9c7e58f3798cf to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/789af020a2094db886d9c7e58f3798cf 2024-11-22T19:22:51,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/4e68a64920954d9dbf1df3e52623b674, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e4c26ae438442bdb73d5988945d3f68, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/46825ad2dec845048f6478348a843c30, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/b35792afbddc43e6b4d387a579b9a740, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9c69aac7e2404c4badfad54968d8901d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cd7c343690ce4263a4936ebe8b26c602, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/67c08d5d7baa4ca5b88186783c87d2a0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/2e4c4cf4cb6447d4986259514c57afe9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e5d541d4285432fb1b69513f42f9963, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ffa3f9843fb14df49d8f45866ca9debe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d9f8154f795043a3bba02c8e28a1c498, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/c3754341a81e4778b526f51ff2b512de, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/3b2a5a660ea94c089d7d264be69a3123, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f16f40ce771c4c878a8a9aabc4175394, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/bdfe693bd15d4f9d8028f454740602d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cb4095c8eea0432a8e55dd05b467fe0d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/581c741a40e74d8cbf4139684a778367, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ddd6b1b6140c4057b08b84ba5bb09cac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e6dc0c95e02449549ba29b1e522d4bdf, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6abe85f18dae4d35987672cca9a61ead, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cc94112ec4cd4ba2bdbbd7a986fb29f9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/febc62383c594099a652d83adbb102e9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ab5a7b4963dd43038d2547ab39b6bcc2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/7c8e94dd482f495da9c5e446983a6d48, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e62c728c364c4854a4db0d0d661660ae, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6aa3d29249e24f50a42f3e1b6d21440a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9124c9f809f047f49b6346df80318275, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ebd8ba035c78422f808a6677f7df90cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/aa5d342bfe274cd39bd6ea9292a7808f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0acbc5ae0aa248f8bc78863873b4e59f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/03f96079b22440e6b58d0f48d5c02964, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0559dad426c440789ea6fc1372459b1e] to archive 2024-11-22T19:22:51,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:22:51,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/4e68a64920954d9dbf1df3e52623b674 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/4e68a64920954d9dbf1df3e52623b674 2024-11-22T19:22:51,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e4c26ae438442bdb73d5988945d3f68 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e4c26ae438442bdb73d5988945d3f68 2024-11-22T19:22:51,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/46825ad2dec845048f6478348a843c30 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/46825ad2dec845048f6478348a843c30 2024-11-22T19:22:51,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/b35792afbddc43e6b4d387a579b9a740 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/b35792afbddc43e6b4d387a579b9a740 2024-11-22T19:22:51,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9c69aac7e2404c4badfad54968d8901d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9c69aac7e2404c4badfad54968d8901d 2024-11-22T19:22:51,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cd7c343690ce4263a4936ebe8b26c602 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cd7c343690ce4263a4936ebe8b26c602 2024-11-22T19:22:51,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/67c08d5d7baa4ca5b88186783c87d2a0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/67c08d5d7baa4ca5b88186783c87d2a0 2024-11-22T19:22:51,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/2e4c4cf4cb6447d4986259514c57afe9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/2e4c4cf4cb6447d4986259514c57afe9 2024-11-22T19:22:51,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e5d541d4285432fb1b69513f42f9963 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0e5d541d4285432fb1b69513f42f9963 2024-11-22T19:22:51,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ffa3f9843fb14df49d8f45866ca9debe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ffa3f9843fb14df49d8f45866ca9debe 2024-11-22T19:22:51,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d9f8154f795043a3bba02c8e28a1c498 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d9f8154f795043a3bba02c8e28a1c498 2024-11-22T19:22:51,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/c3754341a81e4778b526f51ff2b512de to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/c3754341a81e4778b526f51ff2b512de 2024-11-22T19:22:51,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/3b2a5a660ea94c089d7d264be69a3123 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/3b2a5a660ea94c089d7d264be69a3123 2024-11-22T19:22:51,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f16f40ce771c4c878a8a9aabc4175394 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f16f40ce771c4c878a8a9aabc4175394 2024-11-22T19:22:51,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/bdfe693bd15d4f9d8028f454740602d2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/bdfe693bd15d4f9d8028f454740602d2 2024-11-22T19:22:51,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cb4095c8eea0432a8e55dd05b467fe0d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cb4095c8eea0432a8e55dd05b467fe0d 2024-11-22T19:22:51,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/581c741a40e74d8cbf4139684a778367 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/581c741a40e74d8cbf4139684a778367 2024-11-22T19:22:51,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ddd6b1b6140c4057b08b84ba5bb09cac to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ddd6b1b6140c4057b08b84ba5bb09cac 2024-11-22T19:22:51,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e6dc0c95e02449549ba29b1e522d4bdf to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e6dc0c95e02449549ba29b1e522d4bdf 2024-11-22T19:22:51,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6abe85f18dae4d35987672cca9a61ead to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6abe85f18dae4d35987672cca9a61ead 2024-11-22T19:22:51,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cc94112ec4cd4ba2bdbbd7a986fb29f9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/cc94112ec4cd4ba2bdbbd7a986fb29f9 2024-11-22T19:22:51,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/febc62383c594099a652d83adbb102e9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/febc62383c594099a652d83adbb102e9 2024-11-22T19:22:51,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ab5a7b4963dd43038d2547ab39b6bcc2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ab5a7b4963dd43038d2547ab39b6bcc2 2024-11-22T19:22:51,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/7c8e94dd482f495da9c5e446983a6d48 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/7c8e94dd482f495da9c5e446983a6d48 2024-11-22T19:22:51,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e62c728c364c4854a4db0d0d661660ae to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/e62c728c364c4854a4db0d0d661660ae 2024-11-22T19:22:51,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6aa3d29249e24f50a42f3e1b6d21440a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/6aa3d29249e24f50a42f3e1b6d21440a 2024-11-22T19:22:51,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9124c9f809f047f49b6346df80318275 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9124c9f809f047f49b6346df80318275 2024-11-22T19:22:51,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ebd8ba035c78422f808a6677f7df90cc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/ebd8ba035c78422f808a6677f7df90cc 2024-11-22T19:22:51,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/aa5d342bfe274cd39bd6ea9292a7808f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/aa5d342bfe274cd39bd6ea9292a7808f 2024-11-22T19:22:51,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0acbc5ae0aa248f8bc78863873b4e59f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0acbc5ae0aa248f8bc78863873b4e59f 2024-11-22T19:22:51,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/03f96079b22440e6b58d0f48d5c02964 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/03f96079b22440e6b58d0f48d5c02964 2024-11-22T19:22:51,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0559dad426c440789ea6fc1372459b1e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/0559dad426c440789ea6fc1372459b1e 2024-11-22T19:22:51,591 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/recovered.edits/507.seqid, newMaxSeqId=507, maxSeqId=4 2024-11-22T19:22:51,592 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040. 2024-11-22T19:22:51,593 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 7de534ec4ea5964284edbc5ae1079040: 2024-11-22T19:22:51,594 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,596 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=7de534ec4ea5964284edbc5ae1079040, regionState=CLOSED 2024-11-22T19:22:51,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-22T19:22:51,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 7de534ec4ea5964284edbc5ae1079040, server=a307a1377457,35917,1732303314657 in 848 msec 2024-11-22T19:22:51,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-22T19:22:51,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7de534ec4ea5964284edbc5ae1079040, UNASSIGN in 852 msec 2024-11-22T19:22:51,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-22T19:22:51,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 858 msec 2024-11-22T19:22:51,605 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303371605"}]},"ts":"1732303371605"} 2024-11-22T19:22:51,606 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T19:22:51,608 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T19:22:51,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 876 msec 2024-11-22T19:22:51,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T19:22:51,841 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-22T19:22:51,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T19:22:51,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:51,844 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-22T19:22:51,844 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:51,848 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,851 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/recovered.edits] 2024-11-22T19:22:51,854 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/18882afdb3754ed6b673f15276928a80 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/18882afdb3754ed6b673f15276928a80 2024-11-22T19:22:51,855 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4f5cb1dfeae44f039fb94750dc9d4fce to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/4f5cb1dfeae44f039fb94750dc9d4fce 2024-11-22T19:22:51,857 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd47152edfcf4737b0fd963f74a85195 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/A/bd47152edfcf4737b0fd963f74a85195 2024-11-22T19:22:51,860 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6ce46bbcd1de49cf83eb602abae5e2fa to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/6ce46bbcd1de49cf83eb602abae5e2fa 2024-11-22T19:22:51,868 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad1645a048c14f4eb7a9b4f6ead6d5e2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/ad1645a048c14f4eb7a9b4f6ead6d5e2 2024-11-22T19:22:51,870 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/b1caba6e58e641eb8d6a0041073d434a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/B/b1caba6e58e641eb8d6a0041073d434a 2024-11-22T19:22:51,879 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9cc69faabdd64e34b813769bfa451266 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/9cc69faabdd64e34b813769bfa451266 2024-11-22T19:22:51,881 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d8445dd683324c99afab51f2982cb0d0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/d8445dd683324c99afab51f2982cb0d0 2024-11-22T19:22:51,882 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f421248582b640c0b8a1f071fdf66209 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/C/f421248582b640c0b8a1f071fdf66209 2024-11-22T19:22:51,886 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/recovered.edits/507.seqid to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040/recovered.edits/507.seqid 2024-11-22T19:22:51,887 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,887 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T19:22:51,888 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T19:22:51,888 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-22T19:22:51,893 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112210976dec443a49dcafda42d6c10cf834_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112210976dec443a49dcafda42d6c10cf834_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,896 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112214fc1782112a4584919dddd3a24619fe_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112214fc1782112a4584919dddd3a24619fe_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,898 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112220a4837337e741cb9bada8841b054eb9_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112220a4837337e741cb9bada8841b054eb9_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,900 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222ad06c0ec40141109122a13a453389db_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222ad06c0ec40141109122a13a453389db_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,901 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222cbd4595f2b14f788186eda64e0d9618_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222cbd4595f2b14f788186eda64e0d9618_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,902 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223eeba8f47b1f4f678653cef66459c67b_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223eeba8f47b1f4f678653cef66459c67b_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,905 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122451989672b1b4d779c6e999dfec056e5_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122451989672b1b4d779c6e999dfec056e5_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,906 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122488ebecd78824cf9905b380449629eba_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122488ebecd78824cf9905b380449629eba_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,907 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122531931883dd649558afb469ee76598e4_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122531931883dd649558afb469ee76598e4_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,909 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122763a6a42209949a5936d936a5ffb65a1_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122763a6a42209949a5936d936a5ffb65a1_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,911 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112287012928142f462c8ed707bfcbf27dd1_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112287012928142f462c8ed707bfcbf27dd1_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,913 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229a1c800b63984f8899e2871392b0829f_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229a1c800b63984f8899e2871392b0829f_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,914 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a5edcc86766d4933850723d3cc33e413_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a5edcc86766d4933850723d3cc33e413_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,915 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bc0853040956466b9ceedec19fe68200_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bc0853040956466b9ceedec19fe68200_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,917 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c8807a2c450848a6a9b19381e18b8035_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c8807a2c450848a6a9b19381e18b8035_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,919 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cc487af7eb6646e9981150611c4ce6b9_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cc487af7eb6646e9981150611c4ce6b9_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,923 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdc169c48f6f40e8a15c5d18f0339f72_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdc169c48f6f40e8a15c5d18f0339f72_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,924 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d7b0c02328464bd4b67e2715afddcd1b_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d7b0c02328464bd4b67e2715afddcd1b_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,926 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d93ceb2de759420e827d9479442d3fd7_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d93ceb2de759420e827d9479442d3fd7_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,929 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122da356618f0464f7baa937e736ab80b4e_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122da356618f0464f7baa937e736ab80b4e_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,930 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122dd084e10123a4b4b87acc610e7a4e01d_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122dd084e10123a4b4b87acc610e7a4e01d_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,931 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e91f531396344cc5af7e129a78e9777d_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e91f531396344cc5af7e129a78e9777d_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,933 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f12f239d9ca74425b897985d85b56603_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f12f239d9ca74425b897985d85b56603_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,936 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f37ac0a738ab466989d55e773ee2851d_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f37ac0a738ab466989d55e773ee2851d_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,937 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fa72cac49e0d404bace59c617f3c1c4a_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fa72cac49e0d404bace59c617f3c1c4a_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,939 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fb8f0e682bbb49eb88c0a14c2f166e1e_7de534ec4ea5964284edbc5ae1079040 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fb8f0e682bbb49eb88c0a14c2f166e1e_7de534ec4ea5964284edbc5ae1079040 2024-11-22T19:22:51,939 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T19:22:51,942 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-22T19:22:51,948 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T19:22:51,961 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T19:22:51,964 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:51,964 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T19:22:51,965 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732303371964"}]},"ts":"9223372036854775807"} 2024-11-22T19:22:51,980 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T19:22:51,980 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7de534ec4ea5964284edbc5ae1079040, NAME => 'TestAcidGuarantees,,1732303346564.7de534ec4ea5964284edbc5ae1079040.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T19:22:51,980 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T19:22:51,981 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732303371980"}]},"ts":"9223372036854775807"} 2024-11-22T19:22:51,986 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T19:22:51,988 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:51,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 146 msec 2024-11-22T19:22:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-22T19:22:52,146 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-22T19:22:52,159 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=244 (was 238) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1527644496_22 at /127.0.0.1:52112 [Waiting for operation #206] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-647651687_22 at /127.0.0.1:42826 [Waiting for operation #556] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-647651687_22 at /127.0.0.1:42784 [Waiting for operation #582] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1527644496_22 at /127.0.0.1:43336 [Waiting for operation #1028] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4db3c113-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=465 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=737 (was 632) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4169 (was 4353) 2024-11-22T19:22:52,170 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=244, OpenFileDescriptor=465, MaxFileDescriptor=1048576, SystemLoadAverage=737, ProcessCount=11, AvailableMemoryMB=4169 2024-11-22T19:22:52,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:22:52,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:22:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T19:22:52,173 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:22:52,174 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:52,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-22T19:22:52,175 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:22:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T19:22:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742117_1293 (size=963) 2024-11-22T19:22:52,184 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:22:52,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742118_1294 (size=53) 2024-11-22T19:22:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T19:22:52,401 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T19:22:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T19:22:52,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:52,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 583533db2ec7fa9b81dbb4dd334629b0, disabling compactions & flushes 2024-11-22T19:22:52,591 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. after waiting 0 ms 2024-11-22T19:22:52,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,591 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:52,592 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:22:52,592 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732303372592"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303372592"}]},"ts":"1732303372592"} 2024-11-22T19:22:52,593 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:22:52,594 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:22:52,594 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303372594"}]},"ts":"1732303372594"} 2024-11-22T19:22:52,595 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T19:22:52,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, ASSIGN}] 2024-11-22T19:22:52,601 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, ASSIGN 2024-11-22T19:22:52,602 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:22:52,753 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=583533db2ec7fa9b81dbb4dd334629b0, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:52,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:22:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T19:22:52,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:52,909 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,909 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:22:52,910 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,910 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:22:52,910 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,910 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,911 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,912 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:52,913 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 583533db2ec7fa9b81dbb4dd334629b0 columnFamilyName A 2024-11-22T19:22:52,913 DEBUG [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:52,913 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.HStore(327): Store=583533db2ec7fa9b81dbb4dd334629b0/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:52,913 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,914 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:52,915 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 583533db2ec7fa9b81dbb4dd334629b0 columnFamilyName B 2024-11-22T19:22:52,915 DEBUG [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:52,915 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.HStore(327): Store=583533db2ec7fa9b81dbb4dd334629b0/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:52,915 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,917 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:22:52,917 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 583533db2ec7fa9b81dbb4dd334629b0 columnFamilyName C 2024-11-22T19:22:52,917 DEBUG [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:22:52,917 INFO [StoreOpener-583533db2ec7fa9b81dbb4dd334629b0-1 {}] regionserver.HStore(327): Store=583533db2ec7fa9b81dbb4dd334629b0/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:22:52,918 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,919 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,919 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,921 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:22:52,922 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:52,929 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:22:52,929 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 583533db2ec7fa9b81dbb4dd334629b0; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58722229, jitterRate=-0.12497060000896454}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:22:52,930 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:52,931 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., pid=70, masterSystemTime=1732303372905 2024-11-22T19:22:52,934 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,934 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:52,934 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=583533db2ec7fa9b81dbb4dd334629b0, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:22:52,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-22T19:22:52,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 in 181 msec 2024-11-22T19:22:52,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-22T19:22:52,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, ASSIGN in 337 msec 2024-11-22T19:22:52,941 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:22:52,941 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303372941"}]},"ts":"1732303372941"} 2024-11-22T19:22:52,942 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T19:22:52,945 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:22:52,946 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 773 msec 2024-11-22T19:22:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T19:22:53,284 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-22T19:22:53,285 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b6adc5 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a569490 2024-11-22T19:22:53,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1ac389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,291 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,292 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55800, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,296 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:22:53,298 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:22:53,300 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x669e1999 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6862e3ce 2024-11-22T19:22:53,303 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28e73c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,305 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72aa9ee5 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d296fed 2024-11-22T19:22:53,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c480dfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,310 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-11-22T19:22:53,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-11-22T19:22:53,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,319 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-11-22T19:22:53,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,323 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-11-22T19:22:53,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,330 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e575aa to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a0e9c8f 2024-11-22T19:22:53,339 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36642cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,340 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131ceb8f to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d68f787 2024-11-22T19:22:53,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c299cfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,352 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-11-22T19:22:53,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,363 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328852db to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1730a60f 2024-11-22T19:22:53,369 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3677bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:22:53,384 DEBUG [hconnection-0x280c8c04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,385 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,386 DEBUG [hconnection-0x6425c3d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,387 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:53,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:53,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:53,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:53,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:53,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:53,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:53,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:53,400 DEBUG [hconnection-0x7b7d4f2e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,401 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-22T19:22:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T19:22:53,427 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:53,428 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:53,428 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:53,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303433466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,468 DEBUG [hconnection-0x4cb3ba0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,470 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,480 DEBUG [hconnection-0x4220ef3d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,481 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303433483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,495 DEBUG [hconnection-0x7490d2c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,496 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303433499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e998fc66c6704f2aa6ba1f3281eb20ff is 50, key is test_row_0/A:col10/1732303373390/Put/seqid=0 2024-11-22T19:22:53,512 DEBUG [hconnection-0x77bff0fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,513 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303433517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T19:22:53,541 DEBUG [hconnection-0xa1815ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,543 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303433568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,577 DEBUG [hconnection-0x58bccebd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,580 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,584 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:53,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:53,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:53,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:53,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:53,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303433588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303433603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742119_1295 (size=12001) 2024-11-22T19:22:53,623 DEBUG [hconnection-0x55d88a5d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:22:53,625 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:22:53,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303433625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303433626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T19:22:53,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303433730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:53,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:53,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:53,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:53,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:53,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303433774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303433792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303433810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303433830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:53,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:53,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:53,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:53,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:53,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:53,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:53,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:53,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303433935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e998fc66c6704f2aa6ba1f3281eb20ff 2024-11-22T19:22:54,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T19:22:54,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:54,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:54,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:54,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/cc17d7d28cbf43b0bb2ac8579ac76004 is 50, key is test_row_0/B:col10/1732303373390/Put/seqid=0 2024-11-22T19:22:54,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742120_1296 (size=12001) 2024-11-22T19:22:54,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/cc17d7d28cbf43b0bb2ac8579ac76004 2024-11-22T19:22:54,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303434079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303434100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303434121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303434137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/07d11fc1ba604d218aa49aca48d5db91 is 50, key is test_row_0/C:col10/1732303373390/Put/seqid=0 2024-11-22T19:22:54,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742121_1297 (size=12001) 2024-11-22T19:22:54,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:54,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:54,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:54,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303434240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,351 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:54,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:54,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:54,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,505 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:54,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:54,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:54,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T19:22:54,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303434585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/07d11fc1ba604d218aa49aca48d5db91 2024-11-22T19:22:54,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e998fc66c6704f2aa6ba1f3281eb20ff as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e998fc66c6704f2aa6ba1f3281eb20ff 2024-11-22T19:22:54,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303434604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e998fc66c6704f2aa6ba1f3281eb20ff, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T19:22:54,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/cc17d7d28cbf43b0bb2ac8579ac76004 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc17d7d28cbf43b0bb2ac8579ac76004 2024-11-22T19:22:54,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303434626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc17d7d28cbf43b0bb2ac8579ac76004, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T19:22:54,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/07d11fc1ba604d218aa49aca48d5db91 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/07d11fc1ba604d218aa49aca48d5db91 2024-11-22T19:22:54,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303434641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/07d11fc1ba604d218aa49aca48d5db91, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T19:22:54,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 583533db2ec7fa9b81dbb4dd334629b0 in 1253ms, sequenceid=12, compaction requested=false 2024-11-22T19:22:54,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:54,658 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:54,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T19:22:54,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,659 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:54,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:54,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:54,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:54,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:54,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:54,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:54,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d6f4973880e44a02a1883d142ca29ad2 is 50, key is test_row_0/A:col10/1732303373463/Put/seqid=0 2024-11-22T19:22:54,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742122_1298 (size=12001) 2024-11-22T19:22:54,698 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d6f4973880e44a02a1883d142ca29ad2 2024-11-22T19:22:54,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/31484cfc2c08467c842ddc2024d38808 is 50, key is test_row_0/B:col10/1732303373463/Put/seqid=0 2024-11-22T19:22:54,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:54,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:54,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742123_1299 (size=12001) 2024-11-22T19:22:54,779 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/31484cfc2c08467c842ddc2024d38808 2024-11-22T19:22:54,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/eb21e6b4e9254f83a2e413b783a89d11 is 50, key is test_row_0/C:col10/1732303373463/Put/seqid=0 2024-11-22T19:22:54,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:54,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303434796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:54,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742124_1300 (size=12001) 2024-11-22T19:22:54,820 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/eb21e6b4e9254f83a2e413b783a89d11 2024-11-22T19:22:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d6f4973880e44a02a1883d142ca29ad2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6f4973880e44a02a1883d142ca29ad2 2024-11-22T19:22:54,832 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6f4973880e44a02a1883d142ca29ad2, entries=150, sequenceid=37, filesize=11.7 K 2024-11-22T19:22:54,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/31484cfc2c08467c842ddc2024d38808 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/31484cfc2c08467c842ddc2024d38808 2024-11-22T19:22:54,838 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/31484cfc2c08467c842ddc2024d38808, entries=150, sequenceid=37, filesize=11.7 K 2024-11-22T19:22:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/eb21e6b4e9254f83a2e413b783a89d11 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/eb21e6b4e9254f83a2e413b783a89d11 2024-11-22T19:22:54,845 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/eb21e6b4e9254f83a2e413b783a89d11, entries=150, sequenceid=37, filesize=11.7 K 2024-11-22T19:22:54,846 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 583533db2ec7fa9b81dbb4dd334629b0 in 187ms, sequenceid=37, compaction requested=false 2024-11-22T19:22:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-22T19:22:54,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-22T19:22:54,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-22T19:22:54,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4200 sec 2024-11-22T19:22:54,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.4320 sec 2024-11-22T19:22:54,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:54,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:22:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:54,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4b9430fa6ba14e4091144bdbff542e87 is 50, key is test_row_0/A:col10/1732303374795/Put/seqid=0 2024-11-22T19:22:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742125_1301 (size=12001) 2024-11-22T19:22:54,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4b9430fa6ba14e4091144bdbff542e87 2024-11-22T19:22:54,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/38e95111fedb448298bf4114b1092be2 is 50, key is test_row_0/B:col10/1732303374795/Put/seqid=0 2024-11-22T19:22:55,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303434999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742126_1302 (size=12001) 2024-11-22T19:22:55,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303435102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303435305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/38e95111fedb448298bf4114b1092be2 2024-11-22T19:22:55,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a0050d7dd4dd4d8ba373c84b36d7ce96 is 50, key is test_row_0/C:col10/1732303374795/Put/seqid=0 2024-11-22T19:22:55,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742127_1303 (size=12001) 2024-11-22T19:22:55,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a0050d7dd4dd4d8ba373c84b36d7ce96 2024-11-22T19:22:55,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4b9430fa6ba14e4091144bdbff542e87 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4b9430fa6ba14e4091144bdbff542e87 2024-11-22T19:22:55,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4b9430fa6ba14e4091144bdbff542e87, entries=150, sequenceid=49, filesize=11.7 K 2024-11-22T19:22:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/38e95111fedb448298bf4114b1092be2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/38e95111fedb448298bf4114b1092be2 2024-11-22T19:22:55,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/38e95111fedb448298bf4114b1092be2, entries=150, sequenceid=49, filesize=11.7 K 2024-11-22T19:22:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a0050d7dd4dd4d8ba373c84b36d7ce96 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a0050d7dd4dd4d8ba373c84b36d7ce96 2024-11-22T19:22:55,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a0050d7dd4dd4d8ba373c84b36d7ce96, entries=150, sequenceid=49, filesize=11.7 K 2024-11-22T19:22:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T19:22:55,533 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-22T19:22:55,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 583533db2ec7fa9b81dbb4dd334629b0 in 630ms, sequenceid=49, compaction requested=true 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:55,533 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:55,533 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:55,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:55,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:55,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:55,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:22:55,534 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,535 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e998fc66c6704f2aa6ba1f3281eb20ff, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6f4973880e44a02a1883d142ca29ad2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4b9430fa6ba14e4091144bdbff542e87] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.2 K 2024-11-22T19:22:55,535 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e998fc66c6704f2aa6ba1f3281eb20ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732303373390 2024-11-22T19:22:55,535 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:55,535 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6f4973880e44a02a1883d142ca29ad2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732303373453 2024-11-22T19:22:55,536 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:22:55,536 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,536 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc17d7d28cbf43b0bb2ac8579ac76004, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/31484cfc2c08467c842ddc2024d38808, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/38e95111fedb448298bf4114b1092be2] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.2 K 2024-11-22T19:22:55,536 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b9430fa6ba14e4091144bdbff542e87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732303374784 2024-11-22T19:22:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-22T19:22:55,536 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cc17d7d28cbf43b0bb2ac8579ac76004, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732303373390 2024-11-22T19:22:55,537 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 31484cfc2c08467c842ddc2024d38808, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732303373453 2024-11-22T19:22:55,537 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e95111fedb448298bf4114b1092be2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732303374784 2024-11-22T19:22:55,537 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:55,538 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:55,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T19:22:55,557 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#249 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:55,558 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/8284d7cd94a24778ad876f7ff1cccfd6 is 50, key is test_row_0/A:col10/1732303374795/Put/seqid=0 2024-11-22T19:22:55,574 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#250 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:55,574 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/563fcfc545564256a9a6cf57aff71f63 is 50, key is test_row_0/B:col10/1732303374795/Put/seqid=0 2024-11-22T19:22:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:55,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:55,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:55,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:55,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:55,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:55,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:55,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:55,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742128_1304 (size=12104) 2024-11-22T19:22:55,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303435619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303435620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303435620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303435631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742129_1305 (size=12104) 2024-11-22T19:22:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T19:22:55,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/563fcfc545564256a9a6cf57aff71f63 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/563fcfc545564256a9a6cf57aff71f63 2024-11-22T19:22:55,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d6a937864e2c47acbae6dfb1f4c22134 is 50, key is test_row_0/A:col10/1732303375597/Put/seqid=0 2024-11-22T19:22:55,652 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 563fcfc545564256a9a6cf57aff71f63(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:55,652 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:55,652 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=13, startTime=1732303375533; duration=0sec 2024-11-22T19:22:55,652 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:55,652 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:22:55,652 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:22:55,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303435651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,653 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:22:55,654 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:22:55,654 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,654 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/07d11fc1ba604d218aa49aca48d5db91, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/eb21e6b4e9254f83a2e413b783a89d11, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a0050d7dd4dd4d8ba373c84b36d7ce96] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.2 K 2024-11-22T19:22:55,654 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 07d11fc1ba604d218aa49aca48d5db91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732303373390 2024-11-22T19:22:55,655 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting eb21e6b4e9254f83a2e413b783a89d11, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732303373453 2024-11-22T19:22:55,655 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a0050d7dd4dd4d8ba373c84b36d7ce96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732303374784 2024-11-22T19:22:55,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742130_1306 (size=14341) 2024-11-22T19:22:55,667 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:55,668 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/7ba98a3864914cab8cfee1d6e2402b76 is 50, key is test_row_0/C:col10/1732303374795/Put/seqid=0 2024-11-22T19:22:55,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d6a937864e2c47acbae6dfb1f4c22134 2024-11-22T19:22:55,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/91d1d9a9867642e4b6fa1f661cdf9bb5 is 50, key is test_row_0/B:col10/1732303375597/Put/seqid=0 2024-11-22T19:22:55,690 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:55,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742131_1307 (size=12104) 2024-11-22T19:22:55,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742132_1308 (size=12001) 2024-11-22T19:22:55,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303435724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303435726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303435726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T19:22:55,844 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:55,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:55,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:55,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303435929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303435930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:55,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303435932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:55,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:55,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:55,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:55,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:55,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:55,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,020 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/8284d7cd94a24778ad876f7ff1cccfd6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/8284d7cd94a24778ad876f7ff1cccfd6 2024-11-22T19:22:56,025 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 8284d7cd94a24778ad876f7ff1cccfd6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:56,025 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:56,025 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=13, startTime=1732303375533; duration=0sec 2024-11-22T19:22:56,025 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:56,025 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:22:56,107 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/7ba98a3864914cab8cfee1d6e2402b76 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7ba98a3864914cab8cfee1d6e2402b76 2024-11-22T19:22:56,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/91d1d9a9867642e4b6fa1f661cdf9bb5 2024-11-22T19:22:56,114 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into 7ba98a3864914cab8cfee1d6e2402b76(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:56,114 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:56,114 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=13, startTime=1732303375533; duration=0sec 2024-11-22T19:22:56,114 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:56,114 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:22:56,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2ff15428645c4558a2c7a7d3d80f9aaa is 50, key is test_row_0/C:col10/1732303375597/Put/seqid=0 2024-11-22T19:22:56,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742133_1309 (size=12001) 2024-11-22T19:22:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T19:22:56,150 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:56,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:56,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:56,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303436232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303436234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303436235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:56,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:56,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:56,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2ff15428645c4558a2c7a7d3d80f9aaa 2024-11-22T19:22:56,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d6a937864e2c47acbae6dfb1f4c22134 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6a937864e2c47acbae6dfb1f4c22134 2024-11-22T19:22:56,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6a937864e2c47acbae6dfb1f4c22134, entries=200, sequenceid=74, filesize=14.0 K 2024-11-22T19:22:56,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/91d1d9a9867642e4b6fa1f661cdf9bb5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/91d1d9a9867642e4b6fa1f661cdf9bb5 2024-11-22T19:22:56,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/91d1d9a9867642e4b6fa1f661cdf9bb5, entries=150, sequenceid=74, filesize=11.7 K 2024-11-22T19:22:56,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2ff15428645c4558a2c7a7d3d80f9aaa as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2ff15428645c4558a2c7a7d3d80f9aaa 2024-11-22T19:22:56,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2ff15428645c4558a2c7a7d3d80f9aaa, entries=150, sequenceid=74, filesize=11.7 K 2024-11-22T19:22:56,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 583533db2ec7fa9b81dbb4dd334629b0 in 957ms, sequenceid=74, compaction requested=false 2024-11-22T19:22:56,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:56,609 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:56,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T19:22:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:56,610 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:22:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:56,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:56,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/9996d997f48947daafa8cb0a6c20ee78 is 50, key is test_row_0/A:col10/1732303375618/Put/seqid=0 2024-11-22T19:22:56,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742134_1310 (size=12001) 2024-11-22T19:22:56,632 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/9996d997f48947daafa8cb0a6c20ee78 2024-11-22T19:22:56,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T19:22:56,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a062cf04bff14a54beffa6fa7f92fb45 is 50, key is test_row_0/B:col10/1732303375618/Put/seqid=0 2024-11-22T19:22:56,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742135_1311 (size=12001) 2024-11-22T19:22:56,679 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a062cf04bff14a54beffa6fa7f92fb45 2024-11-22T19:22:56,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/0a8a49422e604f69aa9f183bc911e4c0 is 50, key is test_row_0/C:col10/1732303375618/Put/seqid=0 2024-11-22T19:22:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742136_1312 (size=12001) 2024-11-22T19:22:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:56,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303436765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303436767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303436768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303436870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303436870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:56,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:56,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303436872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303437072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303437073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303437075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,126 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/0a8a49422e604f69aa9f183bc911e4c0 2024-11-22T19:22:57,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/9996d997f48947daafa8cb0a6c20ee78 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9996d997f48947daafa8cb0a6c20ee78 2024-11-22T19:22:57,136 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9996d997f48947daafa8cb0a6c20ee78, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:22:57,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a062cf04bff14a54beffa6fa7f92fb45 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a062cf04bff14a54beffa6fa7f92fb45 2024-11-22T19:22:57,142 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T19:22:57,146 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a062cf04bff14a54beffa6fa7f92fb45, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:22:57,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/0a8a49422e604f69aa9f183bc911e4c0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0a8a49422e604f69aa9f183bc911e4c0 2024-11-22T19:22:57,154 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0a8a49422e604f69aa9f183bc911e4c0, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:22:57,155 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 583533db2ec7fa9b81dbb4dd334629b0 in 545ms, sequenceid=89, compaction requested=true 2024-11-22T19:22:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-22T19:22:57,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-22T19:22:57,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-22T19:22:57,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6190 sec 2024-11-22T19:22:57,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.6260 sec 2024-11-22T19:22:57,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:57,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:22:57,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:57,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:57,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:57,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:57,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:57,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:57,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3445bdf1db0f42e88bbe13877459e4b0 is 50, key is test_row_0/A:col10/1732303376762/Put/seqid=0 2024-11-22T19:22:57,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303437402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303437402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303437404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742137_1313 (size=14341) 2024-11-22T19:22:57,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303437506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303437506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303437508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T19:22:57,643 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-22T19:22:57,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:57,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-22T19:22:57,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303437644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T19:22:57,646 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:57,647 DEBUG [Thread-1327 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4198 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:22:57,647 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:57,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:57,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303437662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,664 DEBUG [Thread-1333 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4214 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:22:57,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303437709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303437709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303437711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T19:22:57,798 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:57,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:57,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:57,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:57,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3445bdf1db0f42e88bbe13877459e4b0 2024-11-22T19:22:57,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/22a7bc37de3540c09b8063c08ff16759 is 50, key is test_row_0/B:col10/1732303376762/Put/seqid=0 2024-11-22T19:22:57,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742138_1314 (size=12001) 2024-11-22T19:22:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T19:22:57,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:57,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:57,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:57,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:57,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:57,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:57,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303438013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:58,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:58,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303438013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303438014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:58,104 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:58,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:58,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:58,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/22a7bc37de3540c09b8063c08ff16759 2024-11-22T19:22:58,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/25730cfbb3a04303ac46dda39915e44a is 50, key is test_row_0/C:col10/1732303376762/Put/seqid=0 2024-11-22T19:22:58,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T19:22:58,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742139_1315 (size=12001) 2024-11-22T19:22:58,257 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:58,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:58,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:58,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,410 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:58,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:58,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:58,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303438517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:58,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:58,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303438519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:58,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303438520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:58,564 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:58,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:58,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:58,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:58,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/25730cfbb3a04303ac46dda39915e44a 2024-11-22T19:22:58,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3445bdf1db0f42e88bbe13877459e4b0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3445bdf1db0f42e88bbe13877459e4b0 2024-11-22T19:22:58,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3445bdf1db0f42e88bbe13877459e4b0, entries=200, sequenceid=115, filesize=14.0 K 2024-11-22T19:22:58,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/22a7bc37de3540c09b8063c08ff16759 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/22a7bc37de3540c09b8063c08ff16759 2024-11-22T19:22:58,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/22a7bc37de3540c09b8063c08ff16759, entries=150, sequenceid=115, filesize=11.7 K 2024-11-22T19:22:58,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/25730cfbb3a04303ac46dda39915e44a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/25730cfbb3a04303ac46dda39915e44a 2024-11-22T19:22:58,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/25730cfbb3a04303ac46dda39915e44a, entries=150, sequenceid=115, filesize=11.7 K 2024-11-22T19:22:58,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 583533db2ec7fa9b81dbb4dd334629b0 in 1293ms, sequenceid=115, compaction requested=true 2024-11-22T19:22:58,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:58,672 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:22:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:22:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:22:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:58,672 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:58,673 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:58,673 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:22:58,673 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,673 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:58,673 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/8284d7cd94a24778ad876f7ff1cccfd6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6a937864e2c47acbae6dfb1f4c22134, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9996d997f48947daafa8cb0a6c20ee78, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3445bdf1db0f42e88bbe13877459e4b0] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=51.5 K 2024-11-22T19:22:58,673 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:22:58,673 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,673 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/563fcfc545564256a9a6cf57aff71f63, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/91d1d9a9867642e4b6fa1f661cdf9bb5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a062cf04bff14a54beffa6fa7f92fb45, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/22a7bc37de3540c09b8063c08ff16759] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.0 K 2024-11-22T19:22:58,674 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8284d7cd94a24778ad876f7ff1cccfd6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732303374784 2024-11-22T19:22:58,674 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 563fcfc545564256a9a6cf57aff71f63, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732303374784 2024-11-22T19:22:58,674 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6a937864e2c47acbae6dfb1f4c22134, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732303374986 2024-11-22T19:22:58,674 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 91d1d9a9867642e4b6fa1f661cdf9bb5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732303374993 2024-11-22T19:22:58,674 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9996d997f48947daafa8cb0a6c20ee78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303375609 2024-11-22T19:22:58,674 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3445bdf1db0f42e88bbe13877459e4b0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732303376762 2024-11-22T19:22:58,675 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a062cf04bff14a54beffa6fa7f92fb45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303375609 2024-11-22T19:22:58,675 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a7bc37de3540c09b8063c08ff16759, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732303376762 2024-11-22T19:22:58,685 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#261 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:58,685 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#262 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:58,685 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/89d5151c0bc44e5190615458b9dc8e49 is 50, key is test_row_0/A:col10/1732303376762/Put/seqid=0 2024-11-22T19:22:58,686 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/0ec8cdbfb9d44a89ab94692056cf4123 is 50, key is test_row_0/B:col10/1732303376762/Put/seqid=0 2024-11-22T19:22:58,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742140_1316 (size=12241) 2024-11-22T19:22:58,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742141_1317 (size=12241) 2024-11-22T19:22:58,711 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/0ec8cdbfb9d44a89ab94692056cf4123 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/0ec8cdbfb9d44a89ab94692056cf4123 2024-11-22T19:22:58,715 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 0ec8cdbfb9d44a89ab94692056cf4123(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:58,716 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:58,716 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303378672; duration=0sec 2024-11-22T19:22:58,716 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:22:58,716 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:22:58,716 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:22:58,716 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:58,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T19:22:58,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,717 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T19:22:58,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:58,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:58,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:58,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:58,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:58,718 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:22:58,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:58,718 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:22:58,718 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:58,718 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7ba98a3864914cab8cfee1d6e2402b76, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2ff15428645c4558a2c7a7d3d80f9aaa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0a8a49422e604f69aa9f183bc911e4c0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/25730cfbb3a04303ac46dda39915e44a] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.0 K 2024-11-22T19:22:58,718 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ba98a3864914cab8cfee1d6e2402b76, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732303374784 2024-11-22T19:22:58,719 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff15428645c4558a2c7a7d3d80f9aaa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732303374993 2024-11-22T19:22:58,719 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a8a49422e604f69aa9f183bc911e4c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303375609 2024-11-22T19:22:58,721 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 25730cfbb3a04303ac46dda39915e44a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732303376762 2024-11-22T19:22:58,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/2e4c79dbdb254747a36e7c8d2a7529ae is 50, key is test_row_0/A:col10/1732303377383/Put/seqid=0 2024-11-22T19:22:58,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742142_1318 (size=12001) 2024-11-22T19:22:58,735 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/2e4c79dbdb254747a36e7c8d2a7529ae 2024-11-22T19:22:58,742 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#264 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:22:58,742 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c4a9482162c0461eaaabe5ac5f8db61a is 50, key is test_row_0/C:col10/1732303376762/Put/seqid=0 2024-11-22T19:22:58,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T19:22:58,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e040aa3e9733428686bc30550c98af92 is 50, key is test_row_0/B:col10/1732303377383/Put/seqid=0 2024-11-22T19:22:58,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742143_1319 (size=12241) 2024-11-22T19:22:58,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742144_1320 (size=12001) 2024-11-22T19:22:59,109 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/89d5151c0bc44e5190615458b9dc8e49 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/89d5151c0bc44e5190615458b9dc8e49 2024-11-22T19:22:59,115 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 89d5151c0bc44e5190615458b9dc8e49(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:59,115 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:59,115 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303378671; duration=0sec 2024-11-22T19:22:59,115 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:59,115 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:22:59,134 DEBUG [master/a307a1377457:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 45aa664165800f3151e26f1a3610c687 changed from -1.0 to 0.0, refreshing cache 2024-11-22T19:22:59,187 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e040aa3e9733428686bc30550c98af92 2024-11-22T19:22:59,187 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c4a9482162c0461eaaabe5ac5f8db61a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c4a9482162c0461eaaabe5ac5f8db61a 2024-11-22T19:22:59,198 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into c4a9482162c0461eaaabe5ac5f8db61a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:22:59,198 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:59,198 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303378672; duration=0sec 2024-11-22T19:22:59,198 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:22:59,198 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:22:59,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/6092c449a4384247ac7ea8c9f3e6bba3 is 50, key is test_row_0/C:col10/1732303377383/Put/seqid=0 2024-11-22T19:22:59,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742145_1321 (size=12001) 2024-11-22T19:22:59,225 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/6092c449a4384247ac7ea8c9f3e6bba3 2024-11-22T19:22:59,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/2e4c79dbdb254747a36e7c8d2a7529ae as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/2e4c79dbdb254747a36e7c8d2a7529ae 2024-11-22T19:22:59,245 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/2e4c79dbdb254747a36e7c8d2a7529ae, entries=150, sequenceid=126, filesize=11.7 K 2024-11-22T19:22:59,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e040aa3e9733428686bc30550c98af92 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e040aa3e9733428686bc30550c98af92 2024-11-22T19:22:59,261 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e040aa3e9733428686bc30550c98af92, entries=150, sequenceid=126, filesize=11.7 K 2024-11-22T19:22:59,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/6092c449a4384247ac7ea8c9f3e6bba3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/6092c449a4384247ac7ea8c9f3e6bba3 2024-11-22T19:22:59,270 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/6092c449a4384247ac7ea8c9f3e6bba3, entries=150, sequenceid=126, filesize=11.7 K 2024-11-22T19:22:59,273 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 583533db2ec7fa9b81dbb4dd334629b0 in 556ms, sequenceid=126, compaction requested=false 2024-11-22T19:22:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:22:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-22T19:22:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-22T19:22:59,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-22T19:22:59,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6280 sec 2024-11-22T19:22:59,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.6330 sec 2024-11-22T19:22:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:22:59,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:22:59,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:22:59,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:59,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:22:59,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:59,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:22:59,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:22:59,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77 is 50, key is test_row_0/A:col10/1732303379542/Put/seqid=0 2024-11-22T19:22:59,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742146_1322 (size=12151) 2024-11-22T19:22:59,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303439637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303439638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303439639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303439743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303439744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303439744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T19:22:59,751 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-22T19:22:59,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:22:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-22T19:22:59,754 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:22:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T19:22:59,755 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:22:59,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:22:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T19:22:59,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:22:59,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T19:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:59,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:22:59,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:22:59,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:59,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:22:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303439948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303439948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303439948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:22:59,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77 2024-11-22T19:22:59,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2c51ee0321bd4e648ed9d6c77257d01f is 50, key is test_row_0/B:col10/1732303379542/Put/seqid=0 2024-11-22T19:23:00,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742147_1323 (size=12151) 2024-11-22T19:23:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T19:23:00,060 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:00,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T19:23:00,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:00,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:00,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T19:23:00,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:00,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303440252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303440252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303440253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T19:23:00,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:00,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T19:23:00,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:00,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2c51ee0321bd4e648ed9d6c77257d01f 2024-11-22T19:23:00,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/5cfc75fa999f4c5092de287412eb1dcd is 50, key is test_row_0/C:col10/1732303379542/Put/seqid=0 2024-11-22T19:23:00,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742148_1324 (size=12151) 2024-11-22T19:23:00,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:00,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T19:23:00,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:00,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/5cfc75fa999f4c5092de287412eb1dcd 2024-11-22T19:23:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:00,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77 2024-11-22T19:23:00,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77, entries=150, sequenceid=139, filesize=11.9 K 2024-11-22T19:23:00,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2c51ee0321bd4e648ed9d6c77257d01f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2c51ee0321bd4e648ed9d6c77257d01f 2024-11-22T19:23:00,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2c51ee0321bd4e648ed9d6c77257d01f, entries=150, sequenceid=139, filesize=11.9 K 2024-11-22T19:23:00,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/5cfc75fa999f4c5092de287412eb1dcd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5cfc75fa999f4c5092de287412eb1dcd 2024-11-22T19:23:00,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5cfc75fa999f4c5092de287412eb1dcd, entries=150, sequenceid=139, filesize=11.9 K 2024-11-22T19:23:00,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 583533db2ec7fa9b81dbb4dd334629b0 in 1003ms, sequenceid=139, compaction requested=true 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:00,546 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:00,546 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:00,547 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:00,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:00,548 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,548 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/89d5151c0bc44e5190615458b9dc8e49, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/2e4c79dbdb254747a36e7c8d2a7529ae, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.5 K 2024-11-22T19:23:00,548 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:00,548 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:00,548 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,548 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/0ec8cdbfb9d44a89ab94692056cf4123, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e040aa3e9733428686bc30550c98af92, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2c51ee0321bd4e648ed9d6c77257d01f] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.5 K 2024-11-22T19:23:00,549 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 89d5151c0bc44e5190615458b9dc8e49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732303376762 2024-11-22T19:23:00,549 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e4c79dbdb254747a36e7c8d2a7529ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732303377383 2024-11-22T19:23:00,549 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ec8cdbfb9d44a89ab94692056cf4123, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732303376762 2024-11-22T19:23:00,550 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e040aa3e9733428686bc30550c98af92, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732303377383 2024-11-22T19:23:00,550 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting aaa1bbd6bb404d3fb7824d5cc2ed7c77, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732303379535 2024-11-22T19:23:00,551 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c51ee0321bd4e648ed9d6c77257d01f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732303379535 2024-11-22T19:23:00,570 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:00,571 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2e14ffc5e4204191bc45542fdee093cb is 50, key is test_row_0/B:col10/1732303379542/Put/seqid=0 2024-11-22T19:23:00,582 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:00,583 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/41ab8b729f38403ab08c3fbfa6665c45 is 50, key is test_row_0/A:col10/1732303379542/Put/seqid=0 2024-11-22T19:23:00,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742149_1325 (size=12493) 2024-11-22T19:23:00,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742150_1326 (size=12493) 2024-11-22T19:23:00,674 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:00,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:00,675 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:00,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:00,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f9f71a3ea4e5433c9b31c5792bc94136 is 50, key is test_row_0/A:col10/1732303379629/Put/seqid=0 2024-11-22T19:23:00,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742151_1327 (size=12151) 2024-11-22T19:23:00,715 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f9f71a3ea4e5433c9b31c5792bc94136 2024-11-22T19:23:00,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/f7ff9de4ae9a4ea4848cf2192d633bfe is 50, key is test_row_0/B:col10/1732303379629/Put/seqid=0 2024-11-22T19:23:00,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:00,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:00,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742152_1328 (size=12151) 2024-11-22T19:23:00,761 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/f7ff9de4ae9a4ea4848cf2192d633bfe 2024-11-22T19:23:00,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303440766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/9527662c094e4471a06ab81edb1da9d4 is 50, key is test_row_0/C:col10/1732303379629/Put/seqid=0 2024-11-22T19:23:00,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303440769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303440770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742153_1329 (size=12151) 2024-11-22T19:23:00,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T19:23:00,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303440872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303440873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:00,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303440873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,050 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/41ab8b729f38403ab08c3fbfa6665c45 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/41ab8b729f38403ab08c3fbfa6665c45 2024-11-22T19:23:01,057 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2e14ffc5e4204191bc45542fdee093cb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2e14ffc5e4204191bc45542fdee093cb 2024-11-22T19:23:01,057 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 41ab8b729f38403ab08c3fbfa6665c45(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:01,058 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,058 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=13, startTime=1732303380546; duration=0sec 2024-11-22T19:23:01,058 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:01,058 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:01,058 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:01,061 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:01,061 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:01,061 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:01,061 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c4a9482162c0461eaaabe5ac5f8db61a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/6092c449a4384247ac7ea8c9f3e6bba3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5cfc75fa999f4c5092de287412eb1dcd] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.5 K 2024-11-22T19:23:01,062 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c4a9482162c0461eaaabe5ac5f8db61a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732303376762 2024-11-22T19:23:01,063 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6092c449a4384247ac7ea8c9f3e6bba3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732303377383 2024-11-22T19:23:01,063 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cfc75fa999f4c5092de287412eb1dcd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732303379535 2024-11-22T19:23:01,066 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 2e14ffc5e4204191bc45542fdee093cb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:01,066 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,066 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=13, startTime=1732303380546; duration=0sec 2024-11-22T19:23:01,066 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:01,066 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:01,074 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#275 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:01,075 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/5f6cd007914c405bbe9b438f1ba27623 is 50, key is test_row_0/C:col10/1732303379542/Put/seqid=0 2024-11-22T19:23:01,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303441075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303441076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303441076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742154_1330 (size=12493) 2024-11-22T19:23:01,209 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/9527662c094e4471a06ab81edb1da9d4 2024-11-22T19:23:01,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f9f71a3ea4e5433c9b31c5792bc94136 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f9f71a3ea4e5433c9b31c5792bc94136 2024-11-22T19:23:01,225 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f9f71a3ea4e5433c9b31c5792bc94136, entries=150, sequenceid=164, filesize=11.9 K 2024-11-22T19:23:01,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/f7ff9de4ae9a4ea4848cf2192d633bfe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/f7ff9de4ae9a4ea4848cf2192d633bfe 2024-11-22T19:23:01,232 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/f7ff9de4ae9a4ea4848cf2192d633bfe, entries=150, sequenceid=164, filesize=11.9 K 2024-11-22T19:23:01,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/9527662c094e4471a06ab81edb1da9d4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9527662c094e4471a06ab81edb1da9d4 2024-11-22T19:23:01,241 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9527662c094e4471a06ab81edb1da9d4, entries=150, sequenceid=164, filesize=11.9 K 2024-11-22T19:23:01,243 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 583533db2ec7fa9b81dbb4dd334629b0 in 568ms, sequenceid=164, compaction requested=false 2024-11-22T19:23:01,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:01,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-22T19:23:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-22T19:23:01,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-22T19:23:01,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4900 sec 2024-11-22T19:23:01,249 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.4940 sec 2024-11-22T19:23:01,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:23:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:01,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:01,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:01,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:01,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:01,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:01,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:01,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/c323b2c63fad42fab71c664d62d65d82 is 50, key is test_row_0/A:col10/1732303380768/Put/seqid=0 2024-11-22T19:23:01,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742155_1331 (size=14541) 2024-11-22T19:23:01,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/c323b2c63fad42fab71c664d62d65d82 2024-11-22T19:23:01,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/089284de71dc4c1a89c938bf1f755e54 is 50, key is test_row_0/B:col10/1732303380768/Put/seqid=0 2024-11-22T19:23:01,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742156_1332 (size=12151) 2024-11-22T19:23:01,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/089284de71dc4c1a89c938bf1f755e54 2024-11-22T19:23:01,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/e50601abd3924fc490bddbfb2bae2a75 is 50, key is test_row_0/C:col10/1732303380768/Put/seqid=0 2024-11-22T19:23:01,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742157_1333 (size=12151) 2024-11-22T19:23:01,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/e50601abd3924fc490bddbfb2bae2a75 2024-11-22T19:23:01,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/c323b2c63fad42fab71c664d62d65d82 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c323b2c63fad42fab71c664d62d65d82 2024-11-22T19:23:01,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303441461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303441463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c323b2c63fad42fab71c664d62d65d82, entries=200, sequenceid=179, filesize=14.2 K 2024-11-22T19:23:01,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/089284de71dc4c1a89c938bf1f755e54 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/089284de71dc4c1a89c938bf1f755e54 2024-11-22T19:23:01,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303441469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/089284de71dc4c1a89c938bf1f755e54, entries=150, sequenceid=179, filesize=11.9 K 2024-11-22T19:23:01,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/e50601abd3924fc490bddbfb2bae2a75 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/e50601abd3924fc490bddbfb2bae2a75 2024-11-22T19:23:01,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/e50601abd3924fc490bddbfb2bae2a75, entries=150, sequenceid=179, filesize=11.9 K 2024-11-22T19:23:01,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 583533db2ec7fa9b81dbb4dd334629b0 in 110ms, sequenceid=179, compaction requested=true 2024-11-22T19:23:01,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:01,494 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:01,495 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:01,495 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:01,495 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:01,495 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/41ab8b729f38403ab08c3fbfa6665c45, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f9f71a3ea4e5433c9b31c5792bc94136, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c323b2c63fad42fab71c664d62d65d82] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=38.3 K 2024-11-22T19:23:01,496 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41ab8b729f38403ab08c3fbfa6665c45, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732303379535 2024-11-22T19:23:01,496 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9f71a3ea4e5433c9b31c5792bc94136, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732303379629 2024-11-22T19:23:01,496 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c323b2c63fad42fab71c664d62d65d82, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732303380768 2024-11-22T19:23:01,510 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#279 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:01,511 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/c986815811ec4fae9e33f99971544f2e is 50, key is test_row_0/A:col10/1732303380768/Put/seqid=0 2024-11-22T19:23:01,523 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/5f6cd007914c405bbe9b438f1ba27623 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5f6cd007914c405bbe9b438f1ba27623 2024-11-22T19:23:01,529 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into 5f6cd007914c405bbe9b438f1ba27623(size=12.2 K), total size for store is 35.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:01,529 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,529 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=13, startTime=1732303380546; duration=0sec 2024-11-22T19:23:01,529 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:01,529 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:01,529 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:01,529 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:01,530 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:01,530 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:01,531 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:01,531 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2e14ffc5e4204191bc45542fdee093cb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/f7ff9de4ae9a4ea4848cf2192d633bfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/089284de71dc4c1a89c938bf1f755e54] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.9 K 2024-11-22T19:23:01,531 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e14ffc5e4204191bc45542fdee093cb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732303379535 2024-11-22T19:23:01,531 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting f7ff9de4ae9a4ea4848cf2192d633bfe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732303379629 2024-11-22T19:23:01,532 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 089284de71dc4c1a89c938bf1f755e54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732303380768 2024-11-22T19:23:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742158_1334 (size=12595) 2024-11-22T19:23:01,563 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#280 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:01,565 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a3372c9e099b4cdd9c2038e32285e786 is 50, key is test_row_0/B:col10/1732303380768/Put/seqid=0 2024-11-22T19:23:01,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:23:01,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:01,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742159_1335 (size=12595) 2024-11-22T19:23:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:01,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:01,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:01,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:01,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487 is 50, key is test_row_0/A:col10/1732303381467/Put/seqid=0 2024-11-22T19:23:01,589 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a3372c9e099b4cdd9c2038e32285e786 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3372c9e099b4cdd9c2038e32285e786 2024-11-22T19:23:01,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303441592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,597 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into a3372c9e099b4cdd9c2038e32285e786(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:01,597 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,597 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=13, startTime=1732303381494; duration=0sec 2024-11-22T19:23:01,598 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:01,598 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:01,598 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:01,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303441594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,599 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:01,599 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:01,599 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:01,599 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5f6cd007914c405bbe9b438f1ba27623, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9527662c094e4471a06ab81edb1da9d4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/e50601abd3924fc490bddbfb2bae2a75] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=35.9 K 2024-11-22T19:23:01,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303441596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,600 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f6cd007914c405bbe9b438f1ba27623, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732303379535 2024-11-22T19:23:01,600 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9527662c094e4471a06ab81edb1da9d4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732303379629 2024-11-22T19:23:01,601 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e50601abd3924fc490bddbfb2bae2a75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732303380768 2024-11-22T19:23:01,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742160_1336 (size=14541) 2024-11-22T19:23:01,630 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:01,631 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/dc921f7c342443b9a9cead3b37daa28d is 50, key is test_row_0/C:col10/1732303380768/Put/seqid=0 2024-11-22T19:23:01,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303441653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,657 DEBUG [Thread-1327 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8208 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:01,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742161_1337 (size=12595) 2024-11-22T19:23:01,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303441666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,669 DEBUG [Thread-1333 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8219 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:01,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303441697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303441700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303441701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T19:23:01,860 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-22T19:23:01,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-22T19:23:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T19:23:01,864 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:01,864 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:01,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:01,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303441901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303441905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303441906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:01,953 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/c986815811ec4fae9e33f99971544f2e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c986815811ec4fae9e33f99971544f2e 2024-11-22T19:23:01,959 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into c986815811ec4fae9e33f99971544f2e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:01,959 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:01,959 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=13, startTime=1732303381494; duration=0sec 2024-11-22T19:23:01,959 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:01,959 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:01,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T19:23:02,016 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:02,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T19:23:02,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:02,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487 2024-11-22T19:23:02,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a9644dd2e4a94cc6932b6afa8529db77 is 50, key is test_row_0/B:col10/1732303381467/Put/seqid=0 2024-11-22T19:23:02,067 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/dc921f7c342443b9a9cead3b37daa28d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dc921f7c342443b9a9cead3b37daa28d 2024-11-22T19:23:02,072 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into dc921f7c342443b9a9cead3b37daa28d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:02,072 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:02,072 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=13, startTime=1732303381494; duration=0sec 2024-11-22T19:23:02,072 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:02,072 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742162_1338 (size=12151) 2024-11-22T19:23:02,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a9644dd2e4a94cc6932b6afa8529db77 2024-11-22T19:23:02,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/dbfef666370b4b4482993796c3fc89f0 is 50, key is test_row_0/C:col10/1732303381467/Put/seqid=0 2024-11-22T19:23:02,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742163_1339 (size=12151) 2024-11-22T19:23:02,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T19:23:02,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:02,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T19:23:02,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:02,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303442205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303442210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303442211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,326 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:02,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T19:23:02,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:02,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,327 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T19:23:02,479 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:02,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T19:23:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:02,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/dbfef666370b4b4482993796c3fc89f0 2024-11-22T19:23:02,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487 2024-11-22T19:23:02,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487, entries=200, sequenceid=205, filesize=14.2 K 2024-11-22T19:23:02,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a9644dd2e4a94cc6932b6afa8529db77 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a9644dd2e4a94cc6932b6afa8529db77 2024-11-22T19:23:02,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a9644dd2e4a94cc6932b6afa8529db77, entries=150, sequenceid=205, filesize=11.9 K 2024-11-22T19:23:02,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/dbfef666370b4b4482993796c3fc89f0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dbfef666370b4b4482993796c3fc89f0 2024-11-22T19:23:02,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dbfef666370b4b4482993796c3fc89f0, entries=150, sequenceid=205, filesize=11.9 K 2024-11-22T19:23:02,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 583533db2ec7fa9b81dbb4dd334629b0 in 1002ms, sequenceid=205, compaction requested=false 2024-11-22T19:23:02,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:02,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:02,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T19:23:02,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,634 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:02,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:02,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:02,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:02,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:02,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:02,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:02,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4c8c57a2af7f4b76829be77ce87233b1 is 50, key is test_row_0/A:col10/1732303381587/Put/seqid=0 2024-11-22T19:23:02,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742164_1340 (size=12151) 2024-11-22T19:23:02,691 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4c8c57a2af7f4b76829be77ce87233b1 2024-11-22T19:23:02,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/c52ccc994aa84c55a6bab16a64eeba7f is 50, key is test_row_0/B:col10/1732303381587/Put/seqid=0 2024-11-22T19:23:02,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:02,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:02,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742165_1341 (size=12151) 2024-11-22T19:23:02,749 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/c52ccc994aa84c55a6bab16a64eeba7f 2024-11-22T19:23:02,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303442748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303442750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303442751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/183e5667786a467eaaca07168aefa301 is 50, key is test_row_0/C:col10/1732303381587/Put/seqid=0 2024-11-22T19:23:02,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742166_1342 (size=12151) 2024-11-22T19:23:02,814 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/183e5667786a467eaaca07168aefa301 2024-11-22T19:23:02,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4c8c57a2af7f4b76829be77ce87233b1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4c8c57a2af7f4b76829be77ce87233b1 2024-11-22T19:23:02,828 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4c8c57a2af7f4b76829be77ce87233b1, entries=150, sequenceid=219, filesize=11.9 K 2024-11-22T19:23:02,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/c52ccc994aa84c55a6bab16a64eeba7f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/c52ccc994aa84c55a6bab16a64eeba7f 2024-11-22T19:23:02,837 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/c52ccc994aa84c55a6bab16a64eeba7f, entries=150, sequenceid=219, filesize=11.9 K 2024-11-22T19:23:02,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/183e5667786a467eaaca07168aefa301 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/183e5667786a467eaaca07168aefa301 2024-11-22T19:23:02,843 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/183e5667786a467eaaca07168aefa301, entries=150, sequenceid=219, filesize=11.9 K 2024-11-22T19:23:02,844 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 583533db2ec7fa9b81dbb4dd334629b0 in 210ms, sequenceid=219, compaction requested=true 2024-11-22T19:23:02,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:02,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:02,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-22T19:23:02,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-22T19:23:02,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-22T19:23:02,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 982 msec 2024-11-22T19:23:02,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 987 msec 2024-11-22T19:23:02,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:02,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T19:23:02,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:02,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:02,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:02,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:02,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:02,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:02,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f0f5bc87d69a4eb696bea74fec596085 is 50, key is test_row_0/A:col10/1732303382858/Put/seqid=0 2024-11-22T19:23:02,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303442872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303442872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303442874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742167_1343 (size=12151) 2024-11-22T19:23:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T19:23:02,968 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-22T19:23:02,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-22T19:23:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T19:23:02,971 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:02,972 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:02,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:02,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303442977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303442977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:02,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303442978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T19:23:03,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:03,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:03,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:03,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303443182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303443183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303443183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T19:23:03,278 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:03,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f0f5bc87d69a4eb696bea74fec596085 2024-11-22T19:23:03,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/dba3c26b754e40f6b6e095903dd90492 is 50, key is test_row_0/B:col10/1732303382858/Put/seqid=0 2024-11-22T19:23:03,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742168_1344 (size=12151) 2024-11-22T19:23:03,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/dba3c26b754e40f6b6e095903dd90492 2024-11-22T19:23:03,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/9e86f062cab449d699bdc47add859f3c is 50, key is test_row_0/C:col10/1732303382858/Put/seqid=0 2024-11-22T19:23:03,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:03,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:03,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742169_1345 (size=12151) 2024-11-22T19:23:03,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/9e86f062cab449d699bdc47add859f3c 2024-11-22T19:23:03,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f0f5bc87d69a4eb696bea74fec596085 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f0f5bc87d69a4eb696bea74fec596085 2024-11-22T19:23:03,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f0f5bc87d69a4eb696bea74fec596085, entries=150, sequenceid=247, filesize=11.9 K 2024-11-22T19:23:03,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/dba3c26b754e40f6b6e095903dd90492 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/dba3c26b754e40f6b6e095903dd90492 2024-11-22T19:23:03,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/dba3c26b754e40f6b6e095903dd90492, entries=150, sequenceid=247, filesize=11.9 K 2024-11-22T19:23:03,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/9e86f062cab449d699bdc47add859f3c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9e86f062cab449d699bdc47add859f3c 2024-11-22T19:23:03,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9e86f062cab449d699bdc47add859f3c, entries=150, sequenceid=247, filesize=11.9 K 2024-11-22T19:23:03,471 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-22T19:23:03,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for 583533db2ec7fa9b81dbb4dd334629b0 in 613ms, sequenceid=247, compaction requested=true 2024-11-22T19:23:03,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:03,473 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:03,474 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:03,475 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51438 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:03,475 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:03,475 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,475 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c986815811ec4fae9e33f99971544f2e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4c8c57a2af7f4b76829be77ce87233b1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f0f5bc87d69a4eb696bea74fec596085] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=50.2 K 2024-11-22T19:23:03,476 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c986815811ec4fae9e33f99971544f2e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732303380768 2024-11-22T19:23:03,476 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3bdf3d2cc8042c7b2ac8a5a2f7b4487, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732303381461 2024-11-22T19:23:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:03,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:03,477 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8c57a2af7f4b76829be77ce87233b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732303381587 2024-11-22T19:23:03,477 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:03,478 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:03,478 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,478 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3372c9e099b4cdd9c2038e32285e786, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a9644dd2e4a94cc6932b6afa8529db77, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/c52ccc994aa84c55a6bab16a64eeba7f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/dba3c26b754e40f6b6e095903dd90492] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.9 K 2024-11-22T19:23:03,478 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a3372c9e099b4cdd9c2038e32285e786, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732303380768 2024-11-22T19:23:03,478 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0f5bc87d69a4eb696bea74fec596085, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732303382746 2024-11-22T19:23:03,479 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a9644dd2e4a94cc6932b6afa8529db77, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732303381467 2024-11-22T19:23:03,481 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c52ccc994aa84c55a6bab16a64eeba7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732303381587 2024-11-22T19:23:03,483 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting dba3c26b754e40f6b6e095903dd90492, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732303382746 2024-11-22T19:23:03,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:03,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:03,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:03,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:03,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:03,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:03,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:03,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/98ca44d60a944bc7be1b87b5c195aa2b is 50, key is test_row_0/A:col10/1732303383487/Put/seqid=0 2024-11-22T19:23:03,514 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#292 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:03,515 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7ae264b4011e4ba0af51df78cfb57b2a is 50, key is test_row_0/A:col10/1732303382858/Put/seqid=0 2024-11-22T19:23:03,523 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#293 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:03,524 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/30c19665cbf644f985c0271bf6a15db2 is 50, key is test_row_0/B:col10/1732303382858/Put/seqid=0 2024-11-22T19:23:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303443529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303443533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303443534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T19:23:03,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742170_1346 (size=12201) 2024-11-22T19:23:03,616 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:03,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:03,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:03,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,617 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742171_1347 (size=12731) 2024-11-22T19:23:03,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/98ca44d60a944bc7be1b87b5c195aa2b 2024-11-22T19:23:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742172_1348 (size=12731) 2024-11-22T19:23:03,633 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/30c19665cbf644f985c0271bf6a15db2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/30c19665cbf644f985c0271bf6a15db2 2024-11-22T19:23:03,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e4753735f3c44ebeb6982672f524c6ee is 50, key is test_row_0/B:col10/1732303383487/Put/seqid=0 2024-11-22T19:23:03,638 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 30c19665cbf644f985c0271bf6a15db2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:03,638 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:03,638 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303383474; duration=0sec 2024-11-22T19:23:03,638 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:03,638 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:03,638 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:03,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303443636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,642 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:03,642 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:03,642 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,642 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dc921f7c342443b9a9cead3b37daa28d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dbfef666370b4b4482993796c3fc89f0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/183e5667786a467eaaca07168aefa301, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9e86f062cab449d699bdc47add859f3c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.9 K 2024-11-22T19:23:03,643 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting dc921f7c342443b9a9cead3b37daa28d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732303380768 2024-11-22T19:23:03,643 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting dbfef666370b4b4482993796c3fc89f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732303381467 2024-11-22T19:23:03,643 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 183e5667786a467eaaca07168aefa301, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732303381587 2024-11-22T19:23:03,644 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e86f062cab449d699bdc47add859f3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732303382746 2024-11-22T19:23:03,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303443640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303443640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742173_1349 (size=12201) 2024-11-22T19:23:03,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e4753735f3c44ebeb6982672f524c6ee 2024-11-22T19:23:03,679 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#295 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:03,679 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/05e9f80013eb43ff81ec441314806109 is 50, key is test_row_0/C:col10/1732303382858/Put/seqid=0 2024-11-22T19:23:03,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/616ed1b67f414d49a3fad89974a57474 is 50, key is test_row_0/C:col10/1732303383487/Put/seqid=0 2024-11-22T19:23:03,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742174_1350 (size=12731) 2024-11-22T19:23:03,709 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/05e9f80013eb43ff81ec441314806109 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/05e9f80013eb43ff81ec441314806109 2024-11-22T19:23:03,717 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into 05e9f80013eb43ff81ec441314806109(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:03,717 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:03,717 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303383474; duration=0sec 2024-11-22T19:23:03,718 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:03,718 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:03,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742175_1351 (size=12201) 2024-11-22T19:23:03,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:03,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303443843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303443848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:03,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303443849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:03,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:03,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:03,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:03,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:03,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:04,023 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7ae264b4011e4ba0af51df78cfb57b2a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7ae264b4011e4ba0af51df78cfb57b2a 2024-11-22T19:23:04,029 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 7ae264b4011e4ba0af51df78cfb57b2a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:04,029 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:04,029 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303383472; duration=0sec 2024-11-22T19:23:04,029 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:04,029 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:04,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T19:23:04,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:04,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:04,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:04,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:04,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:04,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:04,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/616ed1b67f414d49a3fad89974a57474 2024-11-22T19:23:04,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/98ca44d60a944bc7be1b87b5c195aa2b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/98ca44d60a944bc7be1b87b5c195aa2b 2024-11-22T19:23:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303444148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/98ca44d60a944bc7be1b87b5c195aa2b, entries=150, sequenceid=258, filesize=11.9 K 2024-11-22T19:23:04,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e4753735f3c44ebeb6982672f524c6ee as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e4753735f3c44ebeb6982672f524c6ee 2024-11-22T19:23:04,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303444153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303444155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e4753735f3c44ebeb6982672f524c6ee, entries=150, sequenceid=258, filesize=11.9 K 2024-11-22T19:23:04,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/616ed1b67f414d49a3fad89974a57474 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/616ed1b67f414d49a3fad89974a57474 2024-11-22T19:23:04,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/616ed1b67f414d49a3fad89974a57474, entries=150, sequenceid=258, filesize=11.9 K 2024-11-22T19:23:04,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 583533db2ec7fa9b81dbb4dd334629b0 in 689ms, sequenceid=258, compaction requested=false 2024-11-22T19:23:04,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:04,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:04,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:04,233 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:04,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:04,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/639ddd5acbc84a12883cc0f2c58454f8 is 50, key is test_row_0/A:col10/1732303383533/Put/seqid=0 2024-11-22T19:23:04,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742176_1352 (size=12301) 2024-11-22T19:23:04,276 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/639ddd5acbc84a12883cc0f2c58454f8 2024-11-22T19:23:04,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/89adbbd3b8fe44ce982810007bb1bc70 is 50, key is test_row_0/B:col10/1732303383533/Put/seqid=0 2024-11-22T19:23:04,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742177_1353 (size=12301) 2024-11-22T19:23:04,330 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/89adbbd3b8fe44ce982810007bb1bc70 2024-11-22T19:23:04,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c7059c3bf91b4ff490a27404b5b7b387 is 50, key is test_row_0/C:col10/1732303383533/Put/seqid=0 2024-11-22T19:23:04,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742178_1354 (size=12301) 2024-11-22T19:23:04,363 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c7059c3bf91b4ff490a27404b5b7b387 2024-11-22T19:23:04,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/639ddd5acbc84a12883cc0f2c58454f8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/639ddd5acbc84a12883cc0f2c58454f8 2024-11-22T19:23:04,376 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/639ddd5acbc84a12883cc0f2c58454f8, entries=150, sequenceid=286, filesize=12.0 K 2024-11-22T19:23:04,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/89adbbd3b8fe44ce982810007bb1bc70 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/89adbbd3b8fe44ce982810007bb1bc70 2024-11-22T19:23:04,389 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/89adbbd3b8fe44ce982810007bb1bc70, entries=150, sequenceid=286, filesize=12.0 K 2024-11-22T19:23:04,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c7059c3bf91b4ff490a27404b5b7b387 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c7059c3bf91b4ff490a27404b5b7b387 2024-11-22T19:23:04,396 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c7059c3bf91b4ff490a27404b5b7b387, entries=150, sequenceid=286, filesize=12.0 K 2024-11-22T19:23:04,397 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 583533db2ec7fa9b81dbb4dd334629b0 in 164ms, sequenceid=286, compaction requested=true 2024-11-22T19:23:04,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:04,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:04,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-22T19:23:04,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-22T19:23:04,400 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-22T19:23:04,400 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4270 sec 2024-11-22T19:23:04,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.4320 sec 2024-11-22T19:23:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:04,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:04,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:04,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:04,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:04,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:04,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:04,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:04,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/a2b417f8f2fa452b899a225a5c54449d is 50, key is test_row_0/A:col10/1732303384667/Put/seqid=0 2024-11-22T19:23:04,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742179_1355 (size=14741) 2024-11-22T19:23:04,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303444761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303444761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303444761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303444867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303444868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:04,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303444868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303445071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303445072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303445072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T19:23:05,076 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-22T19:23:05,078 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:05,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-22T19:23:05,080 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T19:23:05,080 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:05,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:05,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/a2b417f8f2fa452b899a225a5c54449d 2024-11-22T19:23:05,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2cdaad9a6f244a76b3e5a0cbd293e181 is 50, key is test_row_0/B:col10/1732303384667/Put/seqid=0 2024-11-22T19:23:05,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742180_1356 (size=12301) 2024-11-22T19:23:05,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T19:23:05,233 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:05,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T19:23:05,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:05,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303445376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303445377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303445377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T19:23:05,386 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:05,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T19:23:05,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:05,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,539 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:05,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T19:23:05,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:05,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:05,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2cdaad9a6f244a76b3e5a0cbd293e181 2024-11-22T19:23:05,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/0b368cdcc941487da55aa4cbef6aaa0e is 50, key is test_row_0/C:col10/1732303384667/Put/seqid=0 2024-11-22T19:23:05,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742181_1357 (size=12301) 2024-11-22T19:23:05,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/0b368cdcc941487da55aa4cbef6aaa0e 2024-11-22T19:23:05,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/a2b417f8f2fa452b899a225a5c54449d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a2b417f8f2fa452b899a225a5c54449d 2024-11-22T19:23:05,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a2b417f8f2fa452b899a225a5c54449d, entries=200, sequenceid=298, filesize=14.4 K 2024-11-22T19:23:05,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2cdaad9a6f244a76b3e5a0cbd293e181 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2cdaad9a6f244a76b3e5a0cbd293e181 2024-11-22T19:23:05,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2cdaad9a6f244a76b3e5a0cbd293e181, entries=150, sequenceid=298, filesize=12.0 K 2024-11-22T19:23:05,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/0b368cdcc941487da55aa4cbef6aaa0e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0b368cdcc941487da55aa4cbef6aaa0e 2024-11-22T19:23:05,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0b368cdcc941487da55aa4cbef6aaa0e, entries=150, sequenceid=298, filesize=12.0 K 2024-11-22T19:23:05,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 583533db2ec7fa9b81dbb4dd334629b0 in 957ms, sequenceid=298, compaction requested=true 2024-11-22T19:23:05,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:05,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:05,625 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:05,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:05,626 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51974 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:05,626 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:05,626 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,627 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7ae264b4011e4ba0af51df78cfb57b2a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/98ca44d60a944bc7be1b87b5c195aa2b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/639ddd5acbc84a12883cc0f2c58454f8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a2b417f8f2fa452b899a225a5c54449d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=50.8 K 2024-11-22T19:23:05,627 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:05,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:05,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:05,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:05,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:05,628 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ae264b4011e4ba0af51df78cfb57b2a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732303382746 2024-11-22T19:23:05,628 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98ca44d60a944bc7be1b87b5c195aa2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732303382870 2024-11-22T19:23:05,628 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49534 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:05,628 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:05,628 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 639ddd5acbc84a12883cc0f2c58454f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732303383527 2024-11-22T19:23:05,628 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,629 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/30c19665cbf644f985c0271bf6a15db2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e4753735f3c44ebeb6982672f524c6ee, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/89adbbd3b8fe44ce982810007bb1bc70, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2cdaad9a6f244a76b3e5a0cbd293e181] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=48.4 K 2024-11-22T19:23:05,629 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2b417f8f2fa452b899a225a5c54449d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732303384659 2024-11-22T19:23:05,629 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 30c19665cbf644f985c0271bf6a15db2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732303382746 2024-11-22T19:23:05,629 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e4753735f3c44ebeb6982672f524c6ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732303382870 2024-11-22T19:23:05,630 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 89adbbd3b8fe44ce982810007bb1bc70, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732303383527 2024-11-22T19:23:05,630 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cdaad9a6f244a76b3e5a0cbd293e181, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732303384659 2024-11-22T19:23:05,651 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:05,652 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aa5b2408b4d44f3ea6e053ea590ca109 is 50, key is test_row_0/A:col10/1732303384667/Put/seqid=0 2024-11-22T19:23:05,664 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#304 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:05,665 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/537579f02ee245ac8243e5b5898def67 is 50, key is test_row_0/B:col10/1732303384667/Put/seqid=0 2024-11-22T19:23:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T19:23:05,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742182_1358 (size=13017) 2024-11-22T19:23:05,693 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:05,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T19:23:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,694 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:05,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:05,697 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aa5b2408b4d44f3ea6e053ea590ca109 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa5b2408b4d44f3ea6e053ea590ca109 2024-11-22T19:23:05,704 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into aa5b2408b4d44f3ea6e053ea590ca109(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:05,704 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:05,704 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303385625; duration=0sec 2024-11-22T19:23:05,704 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:05,704 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:05,704 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:05,707 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49534 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:05,707 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:05,708 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:05,709 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/05e9f80013eb43ff81ec441314806109, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/616ed1b67f414d49a3fad89974a57474, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c7059c3bf91b4ff490a27404b5b7b387, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0b368cdcc941487da55aa4cbef6aaa0e] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=48.4 K 2024-11-22T19:23:05,709 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05e9f80013eb43ff81ec441314806109, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732303382746 2024-11-22T19:23:05,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742183_1359 (size=13017) 2024-11-22T19:23:05,710 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 616ed1b67f414d49a3fad89974a57474, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732303382870 2024-11-22T19:23:05,711 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7059c3bf91b4ff490a27404b5b7b387, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732303383527 2024-11-22T19:23:05,713 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b368cdcc941487da55aa4cbef6aaa0e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732303384659 2024-11-22T19:23:05,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e2c645302e5f42bb92db86f7175992cc is 50, key is test_row_0/A:col10/1732303384745/Put/seqid=0 2024-11-22T19:23:05,738 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#306 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:05,739 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c is 50, key is test_row_0/C:col10/1732303384667/Put/seqid=0 2024-11-22T19:23:05,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742184_1360 (size=12301) 2024-11-22T19:23:05,767 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e2c645302e5f42bb92db86f7175992cc 2024-11-22T19:23:05,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742185_1361 (size=13017) 2024-11-22T19:23:05,800 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c 2024-11-22T19:23:05,808 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into 2d4d8dd6fac24efeae1a47b3bd0a4c0c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:05,808 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:05,808 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303385627; duration=0sec 2024-11-22T19:23:05,808 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:05,808 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:05,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/cc346babf17d4699b4462bdfe9dafc6c is 50, key is test_row_0/B:col10/1732303384745/Put/seqid=0 2024-11-22T19:23:05,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742186_1362 (size=12301) 2024-11-22T19:23:05,848 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/cc346babf17d4699b4462bdfe9dafc6c 2024-11-22T19:23:05,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a2434eee018544328db0dc86ef407bc2 is 50, key is test_row_0/C:col10/1732303384745/Put/seqid=0 2024-11-22T19:23:05,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:05,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742187_1363 (size=12301) 2024-11-22T19:23:05,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303445897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303445898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:05,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:05,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303445901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303446002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303446003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303446005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,118 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/537579f02ee245ac8243e5b5898def67 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/537579f02ee245ac8243e5b5898def67 2024-11-22T19:23:06,126 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 537579f02ee245ac8243e5b5898def67(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:06,126 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:06,126 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303385627; duration=0sec 2024-11-22T19:23:06,126 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:06,126 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T19:23:06,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303446205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303446206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303446210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,298 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a2434eee018544328db0dc86ef407bc2 2024-11-22T19:23:06,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e2c645302e5f42bb92db86f7175992cc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e2c645302e5f42bb92db86f7175992cc 2024-11-22T19:23:06,308 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e2c645302e5f42bb92db86f7175992cc, entries=150, sequenceid=323, filesize=12.0 K 2024-11-22T19:23:06,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/cc346babf17d4699b4462bdfe9dafc6c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc346babf17d4699b4462bdfe9dafc6c 2024-11-22T19:23:06,315 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc346babf17d4699b4462bdfe9dafc6c, entries=150, sequenceid=323, filesize=12.0 K 2024-11-22T19:23:06,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a2434eee018544328db0dc86ef407bc2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a2434eee018544328db0dc86ef407bc2 2024-11-22T19:23:06,321 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a2434eee018544328db0dc86ef407bc2, entries=150, sequenceid=323, filesize=12.0 K 2024-11-22T19:23:06,322 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 583533db2ec7fa9b81dbb4dd334629b0 in 628ms, sequenceid=323, compaction requested=false 2024-11-22T19:23:06,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:06,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:06,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-22T19:23:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-22T19:23:06,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-22T19:23:06,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2440 sec 2024-11-22T19:23:06,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.2480 sec 2024-11-22T19:23:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:06,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:06,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:06,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:06,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:06,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:06,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:06,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:06,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/04b0ee589b6e4a77860967b8bd44bbfd is 50, key is test_row_0/A:col10/1732303386513/Put/seqid=0 2024-11-22T19:23:06,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742188_1364 (size=12301) 2024-11-22T19:23:06,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/04b0ee589b6e4a77860967b8bd44bbfd 2024-11-22T19:23:06,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/998fdc28d3e945228bad75b6e8bf60b3 is 50, key is test_row_0/B:col10/1732303386513/Put/seqid=0 2024-11-22T19:23:06,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303446565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303446566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303446572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742189_1365 (size=12301) 2024-11-22T19:23:06,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/998fdc28d3e945228bad75b6e8bf60b3 2024-11-22T19:23:06,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/1d4d598cc47d4bcebfc3939d74c81a1b is 50, key is test_row_0/C:col10/1732303386513/Put/seqid=0 2024-11-22T19:23:06,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742190_1366 (size=12301) 2024-11-22T19:23:06,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303446672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303446673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303446674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303446876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303446877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:06,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:06,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303446877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/1d4d598cc47d4bcebfc3939d74c81a1b 2024-11-22T19:23:07,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/04b0ee589b6e4a77860967b8bd44bbfd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/04b0ee589b6e4a77860967b8bd44bbfd 2024-11-22T19:23:07,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/04b0ee589b6e4a77860967b8bd44bbfd, entries=150, sequenceid=339, filesize=12.0 K 2024-11-22T19:23:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/998fdc28d3e945228bad75b6e8bf60b3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/998fdc28d3e945228bad75b6e8bf60b3 2024-11-22T19:23:07,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/998fdc28d3e945228bad75b6e8bf60b3, entries=150, sequenceid=339, filesize=12.0 K 2024-11-22T19:23:07,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/1d4d598cc47d4bcebfc3939d74c81a1b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/1d4d598cc47d4bcebfc3939d74c81a1b 2024-11-22T19:23:07,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/1d4d598cc47d4bcebfc3939d74c81a1b, entries=150, sequenceid=339, filesize=12.0 K 2024-11-22T19:23:07,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 583533db2ec7fa9b81dbb4dd334629b0 in 535ms, sequenceid=339, compaction requested=true 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:07,049 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:07,049 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:07,050 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:07,050 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:07,051 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,051 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa5b2408b4d44f3ea6e053ea590ca109, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e2c645302e5f42bb92db86f7175992cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/04b0ee589b6e4a77860967b8bd44bbfd] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=36.7 K 2024-11-22T19:23:07,051 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:07,051 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:07,051 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,051 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/537579f02ee245ac8243e5b5898def67, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc346babf17d4699b4462bdfe9dafc6c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/998fdc28d3e945228bad75b6e8bf60b3] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=36.7 K 2024-11-22T19:23:07,051 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa5b2408b4d44f3ea6e053ea590ca109, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732303384659 2024-11-22T19:23:07,052 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 537579f02ee245ac8243e5b5898def67, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732303384659 2024-11-22T19:23:07,052 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2c645302e5f42bb92db86f7175992cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732303384745 2024-11-22T19:23:07,052 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cc346babf17d4699b4462bdfe9dafc6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732303384745 2024-11-22T19:23:07,053 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04b0ee589b6e4a77860967b8bd44bbfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732303385895 2024-11-22T19:23:07,053 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 998fdc28d3e945228bad75b6e8bf60b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732303385895 2024-11-22T19:23:07,080 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#312 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:07,081 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e157f187fc8842d4bf4c693c55c4a839 is 50, key is test_row_0/A:col10/1732303386513/Put/seqid=0 2024-11-22T19:23:07,086 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:07,087 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/667c841fa77c45e89adfa45694231198 is 50, key is test_row_0/B:col10/1732303386513/Put/seqid=0 2024-11-22T19:23:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742191_1367 (size=13119) 2024-11-22T19:23:07,138 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/e157f187fc8842d4bf4c693c55c4a839 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e157f187fc8842d4bf4c693c55c4a839 2024-11-22T19:23:07,145 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into e157f187fc8842d4bf4c693c55c4a839(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:07,145 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:07,145 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=13, startTime=1732303387049; duration=0sec 2024-11-22T19:23:07,146 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:07,146 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:07,146 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:07,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742192_1368 (size=13119) 2024-11-22T19:23:07,154 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:07,154 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:07,154 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,154 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a2434eee018544328db0dc86ef407bc2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/1d4d598cc47d4bcebfc3939d74c81a1b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=36.7 K 2024-11-22T19:23:07,155 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d4d8dd6fac24efeae1a47b3bd0a4c0c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732303384659 2024-11-22T19:23:07,155 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2434eee018544328db0dc86ef407bc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732303384745 2024-11-22T19:23:07,156 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d4d598cc47d4bcebfc3939d74c81a1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732303385895 2024-11-22T19:23:07,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:23:07,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:07,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:07,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:07,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:07,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:07,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:07,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T19:23:07,185 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-22T19:23:07,187 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:07,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-22T19:23:07,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T19:23:07,188 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:07,189 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:07,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:07,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:07,195 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c336223973fc4c50bfe6926c259d8e8b is 50, key is test_row_0/C:col10/1732303386513/Put/seqid=0 2024-11-22T19:23:07,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/93453b641b0d4db295e62a0ca57c8d69 is 50, key is test_row_0/A:col10/1732303386554/Put/seqid=0 2024-11-22T19:23:07,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303447202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303447208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303447209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742193_1369 (size=13119) 2024-11-22T19:23:07,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742194_1370 (size=12301) 2024-11-22T19:23:07,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T19:23:07,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303447309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303447313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303447313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,341 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:07,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:07,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:07,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T19:23:07,496 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:07,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:07,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:07,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303447512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303447517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303447518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,560 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/667c841fa77c45e89adfa45694231198 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/667c841fa77c45e89adfa45694231198 2024-11-22T19:23:07,566 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 667c841fa77c45e89adfa45694231198(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:07,566 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:07,566 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=13, startTime=1732303387049; duration=0sec 2024-11-22T19:23:07,566 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:07,566 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:07,648 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c336223973fc4c50bfe6926c259d8e8b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c336223973fc4c50bfe6926c259d8e8b 2024-11-22T19:23:07,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:07,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,655 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into c336223973fc4c50bfe6926c259d8e8b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:07,655 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:07,655 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=13, startTime=1732303387049; duration=0sec 2024-11-22T19:23:07,655 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:07,655 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:07,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/93453b641b0d4db295e62a0ca57c8d69 2024-11-22T19:23:07,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/247783e2af4e4a5ca25cd8ae5b93d668 is 50, key is test_row_0/B:col10/1732303386554/Put/seqid=0 2024-11-22T19:23:07,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742195_1371 (size=12301) 2024-11-22T19:23:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T19:23:07,803 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:07,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:07,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:07,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303447817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303447820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303447821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:07,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:07,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:07,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:07,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:07,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:08,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:08,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:08,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:08,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:08,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:08,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/247783e2af4e4a5ca25cd8ae5b93d668 2024-11-22T19:23:08,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/933c0cc94fc44c70ab8271d2b69dabcd is 50, key is test_row_0/C:col10/1732303386554/Put/seqid=0 2024-11-22T19:23:08,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742196_1372 (size=12301) 2024-11-22T19:23:08,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/933c0cc94fc44c70ab8271d2b69dabcd 2024-11-22T19:23:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/93453b641b0d4db295e62a0ca57c8d69 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/93453b641b0d4db295e62a0ca57c8d69 2024-11-22T19:23:08,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/93453b641b0d4db295e62a0ca57c8d69, entries=150, sequenceid=364, filesize=12.0 K 2024-11-22T19:23:08,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/247783e2af4e4a5ca25cd8ae5b93d668 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/247783e2af4e4a5ca25cd8ae5b93d668 2024-11-22T19:23:08,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/247783e2af4e4a5ca25cd8ae5b93d668, entries=150, sequenceid=364, filesize=12.0 K 2024-11-22T19:23:08,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/933c0cc94fc44c70ab8271d2b69dabcd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/933c0cc94fc44c70ab8271d2b69dabcd 2024-11-22T19:23:08,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/933c0cc94fc44c70ab8271d2b69dabcd, entries=150, sequenceid=364, filesize=12.0 K 2024-11-22T19:23:08,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 583533db2ec7fa9b81dbb4dd334629b0 in 1014ms, sequenceid=364, compaction requested=false 2024-11-22T19:23:08,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:08,262 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:08,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,263 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d1e2f6a326c74f5b99874dcf189920a3 is 50, key is test_row_0/A:col10/1732303387200/Put/seqid=0 2024-11-22T19:23:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T19:23:08,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742197_1373 (size=12301) 2024-11-22T19:23:08,302 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d1e2f6a326c74f5b99874dcf189920a3 2024-11-22T19:23:08,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e69abe4adbec43bda6ccec75f52ac16e is 50, key is test_row_0/B:col10/1732303387200/Put/seqid=0 2024-11-22T19:23:08,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742198_1374 (size=12301) 2024-11-22T19:23:08,343 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e69abe4adbec43bda6ccec75f52ac16e 2024-11-22T19:23:08,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/012460f641fc407c94d14033c086e359 is 50, key is test_row_0/C:col10/1732303387200/Put/seqid=0 2024-11-22T19:23:08,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303448362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303448365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303448365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742199_1375 (size=12301) 2024-11-22T19:23:08,412 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/012460f641fc407c94d14033c086e359 2024-11-22T19:23:08,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/d1e2f6a326c74f5b99874dcf189920a3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d1e2f6a326c74f5b99874dcf189920a3 2024-11-22T19:23:08,424 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d1e2f6a326c74f5b99874dcf189920a3, entries=150, sequenceid=379, filesize=12.0 K 2024-11-22T19:23:08,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e69abe4adbec43bda6ccec75f52ac16e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e69abe4adbec43bda6ccec75f52ac16e 2024-11-22T19:23:08,432 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e69abe4adbec43bda6ccec75f52ac16e, entries=150, sequenceid=379, filesize=12.0 K 2024-11-22T19:23:08,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/012460f641fc407c94d14033c086e359 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/012460f641fc407c94d14033c086e359 2024-11-22T19:23:08,450 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/012460f641fc407c94d14033c086e359, entries=150, sequenceid=379, filesize=12.0 K 2024-11-22T19:23:08,452 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 583533db2ec7fa9b81dbb4dd334629b0 in 189ms, sequenceid=379, compaction requested=true 2024-11-22T19:23:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-22T19:23:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-22T19:23:08,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-22T19:23:08,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2650 sec 2024-11-22T19:23:08,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.2690 sec 2024-11-22T19:23:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:08,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:08,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:08,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:08,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:08,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/49ca99f074db49f8b09e5a453bc1f5c8 is 50, key is test_row_0/A:col10/1732303388363/Put/seqid=0 2024-11-22T19:23:08,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303448485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303448485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303448486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742200_1376 (size=12301) 2024-11-22T19:23:08,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/49ca99f074db49f8b09e5a453bc1f5c8 2024-11-22T19:23:08,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/34c79e3e086f4da4be67d4b35e01ab80 is 50, key is test_row_0/B:col10/1732303388363/Put/seqid=0 2024-11-22T19:23:08,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742201_1377 (size=12301) 2024-11-22T19:23:08,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/34c79e3e086f4da4be67d4b35e01ab80 2024-11-22T19:23:08,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2bf461f9d7af4773b5ad2a468b7811ba is 50, key is test_row_0/C:col10/1732303388363/Put/seqid=0 2024-11-22T19:23:08,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303448590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303448590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303448590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742202_1378 (size=12301) 2024-11-22T19:23:08,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2bf461f9d7af4773b5ad2a468b7811ba 2024-11-22T19:23:08,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/49ca99f074db49f8b09e5a453bc1f5c8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/49ca99f074db49f8b09e5a453bc1f5c8 2024-11-22T19:23:08,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/49ca99f074db49f8b09e5a453bc1f5c8, entries=150, sequenceid=404, filesize=12.0 K 2024-11-22T19:23:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/34c79e3e086f4da4be67d4b35e01ab80 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/34c79e3e086f4da4be67d4b35e01ab80 2024-11-22T19:23:08,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/34c79e3e086f4da4be67d4b35e01ab80, entries=150, sequenceid=404, filesize=12.0 K 2024-11-22T19:23:08,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/2bf461f9d7af4773b5ad2a468b7811ba as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2bf461f9d7af4773b5ad2a468b7811ba 2024-11-22T19:23:08,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2bf461f9d7af4773b5ad2a468b7811ba, entries=150, sequenceid=404, filesize=12.0 K 2024-11-22T19:23:08,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 583533db2ec7fa9b81dbb4dd334629b0 in 195ms, sequenceid=404, compaction requested=true 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:08,666 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:08,666 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:08,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:08,668 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:08,668 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:08,668 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:08,668 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:08,668 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,669 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e157f187fc8842d4bf4c693c55c4a839, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/93453b641b0d4db295e62a0ca57c8d69, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d1e2f6a326c74f5b99874dcf189920a3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/49ca99f074db49f8b09e5a453bc1f5c8] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=48.8 K 2024-11-22T19:23:08,669 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,669 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/667c841fa77c45e89adfa45694231198, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/247783e2af4e4a5ca25cd8ae5b93d668, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e69abe4adbec43bda6ccec75f52ac16e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/34c79e3e086f4da4be67d4b35e01ab80] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=48.8 K 2024-11-22T19:23:08,669 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e157f187fc8842d4bf4c693c55c4a839, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732303385895 2024-11-22T19:23:08,669 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 667c841fa77c45e89adfa45694231198, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732303385895 2024-11-22T19:23:08,670 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93453b641b0d4db295e62a0ca57c8d69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732303386554 2024-11-22T19:23:08,670 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 247783e2af4e4a5ca25cd8ae5b93d668, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732303386554 2024-11-22T19:23:08,671 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1e2f6a326c74f5b99874dcf189920a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1732303387200 2024-11-22T19:23:08,671 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e69abe4adbec43bda6ccec75f52ac16e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1732303387200 2024-11-22T19:23:08,672 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49ca99f074db49f8b09e5a453bc1f5c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732303388356 2024-11-22T19:23:08,672 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 34c79e3e086f4da4be67d4b35e01ab80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732303388356 2024-11-22T19:23:08,699 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:08,700 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f32b34f86dc94f7ab6f769b58de76a50 is 50, key is test_row_0/A:col10/1732303388363/Put/seqid=0 2024-11-22T19:23:08,702 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#325 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:08,702 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/068c842b266d457ca6396ad71ff059ef is 50, key is test_row_0/B:col10/1732303388363/Put/seqid=0 2024-11-22T19:23:08,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742204_1380 (size=13255) 2024-11-22T19:23:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742203_1379 (size=13255) 2024-11-22T19:23:08,756 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/068c842b266d457ca6396ad71ff059ef as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/068c842b266d457ca6396ad71ff059ef 2024-11-22T19:23:08,760 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f32b34f86dc94f7ab6f769b58de76a50 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f32b34f86dc94f7ab6f769b58de76a50 2024-11-22T19:23:08,762 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 068c842b266d457ca6396ad71ff059ef(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:08,762 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:08,763 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303388666; duration=0sec 2024-11-22T19:23:08,763 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:08,763 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:08,763 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:08,765 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:08,765 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:08,765 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:08,765 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c336223973fc4c50bfe6926c259d8e8b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/933c0cc94fc44c70ab8271d2b69dabcd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/012460f641fc407c94d14033c086e359, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2bf461f9d7af4773b5ad2a468b7811ba] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=48.8 K 2024-11-22T19:23:08,766 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c336223973fc4c50bfe6926c259d8e8b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732303385895 2024-11-22T19:23:08,767 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 933c0cc94fc44c70ab8271d2b69dabcd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732303386554 2024-11-22T19:23:08,767 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 012460f641fc407c94d14033c086e359, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1732303387200 2024-11-22T19:23:08,768 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bf461f9d7af4773b5ad2a468b7811ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732303388356 2024-11-22T19:23:08,770 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into f32b34f86dc94f7ab6f769b58de76a50(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:08,770 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:08,770 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303388666; duration=0sec 2024-11-22T19:23:08,770 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:08,770 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:08,795 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#326 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:08,795 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/d7cebb95964743ccb9356c161efb2029 is 50, key is test_row_0/C:col10/1732303388363/Put/seqid=0 2024-11-22T19:23:08,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:23:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:08,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:08,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:08,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:08,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:08,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/fa83f8c2395f4724a2ab9780fd100364 is 50, key is test_row_0/A:col10/1732303388484/Put/seqid=0 2024-11-22T19:23:08,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303448839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742205_1381 (size=13255) 2024-11-22T19:23:08,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303448841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303448843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742206_1382 (size=14741) 2024-11-22T19:23:08,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303448946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303448949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:08,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303448949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303449152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303449153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303449155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,251 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/d7cebb95964743ccb9356c161efb2029 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/d7cebb95964743ccb9356c161efb2029 2024-11-22T19:23:09,256 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into d7cebb95964743ccb9356c161efb2029(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:09,256 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:09,256 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303388666; duration=0sec 2024-11-22T19:23:09,256 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:09,256 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:09,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/fa83f8c2395f4724a2ab9780fd100364 2024-11-22T19:23:09,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e1b0624f68fd43bc945a060485673e44 is 50, key is test_row_0/B:col10/1732303388484/Put/seqid=0 2024-11-22T19:23:09,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T19:23:09,298 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-22T19:23:09,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:09,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-22T19:23:09,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T19:23:09,301 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:09,302 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:09,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742207_1383 (size=12301) 2024-11-22T19:23:09,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e1b0624f68fd43bc945a060485673e44 2024-11-22T19:23:09,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/7e0b0dd57f404d8b8f4577cebcbcb6e3 is 50, key is test_row_0/C:col10/1732303388484/Put/seqid=0 2024-11-22T19:23:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T19:23:09,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742208_1384 (size=12301) 2024-11-22T19:23:09,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/7e0b0dd57f404d8b8f4577cebcbcb6e3 2024-11-22T19:23:09,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/fa83f8c2395f4724a2ab9780fd100364 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/fa83f8c2395f4724a2ab9780fd100364 2024-11-22T19:23:09,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/fa83f8c2395f4724a2ab9780fd100364, entries=200, sequenceid=419, filesize=14.4 K 2024-11-22T19:23:09,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e1b0624f68fd43bc945a060485673e44 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e1b0624f68fd43bc945a060485673e44 2024-11-22T19:23:09,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e1b0624f68fd43bc945a060485673e44, entries=150, sequenceid=419, filesize=12.0 K 2024-11-22T19:23:09,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/7e0b0dd57f404d8b8f4577cebcbcb6e3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7e0b0dd57f404d8b8f4577cebcbcb6e3 2024-11-22T19:23:09,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7e0b0dd57f404d8b8f4577cebcbcb6e3, entries=150, sequenceid=419, filesize=12.0 K 2024-11-22T19:23:09,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 583533db2ec7fa9b81dbb4dd334629b0 in 643ms, sequenceid=419, compaction requested=false 2024-11-22T19:23:09,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:09,454 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:09,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-22T19:23:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:09,455 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:09,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:09,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:09,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f5bf4dfa9ef34001a57159813e6e2bf1 is 50, key is test_row_0/A:col10/1732303388839/Put/seqid=0 2024-11-22T19:23:09,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303449477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303449477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303449477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742209_1385 (size=12301) 2024-11-22T19:23:09,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303449584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303449584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303449585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T19:23:09,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303449789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303449790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:09,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303449790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T19:23:09,904 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f5bf4dfa9ef34001a57159813e6e2bf1 2024-11-22T19:23:09,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/bce6fa366b814b1e8e6a7369894fe66a is 50, key is test_row_0/B:col10/1732303388839/Put/seqid=0 2024-11-22T19:23:09,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742210_1386 (size=12301) 2024-11-22T19:23:09,925 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/bce6fa366b814b1e8e6a7369894fe66a 2024-11-22T19:23:09,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/097bcd9cb10c41f4bf9372d3ee7be4e1 is 50, key is test_row_0/C:col10/1732303388839/Put/seqid=0 2024-11-22T19:23:09,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742211_1387 (size=12301) 2024-11-22T19:23:09,968 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/097bcd9cb10c41f4bf9372d3ee7be4e1 2024-11-22T19:23:09,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/f5bf4dfa9ef34001a57159813e6e2bf1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f5bf4dfa9ef34001a57159813e6e2bf1 2024-11-22T19:23:09,985 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f5bf4dfa9ef34001a57159813e6e2bf1, entries=150, sequenceid=443, filesize=12.0 K 2024-11-22T19:23:09,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/bce6fa366b814b1e8e6a7369894fe66a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/bce6fa366b814b1e8e6a7369894fe66a 2024-11-22T19:23:09,992 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/bce6fa366b814b1e8e6a7369894fe66a, entries=150, sequenceid=443, filesize=12.0 K 2024-11-22T19:23:09,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/097bcd9cb10c41f4bf9372d3ee7be4e1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/097bcd9cb10c41f4bf9372d3ee7be4e1 2024-11-22T19:23:09,999 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/097bcd9cb10c41f4bf9372d3ee7be4e1, entries=150, sequenceid=443, filesize=12.0 K 2024-11-22T19:23:10,001 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 583533db2ec7fa9b81dbb4dd334629b0 in 546ms, sequenceid=443, compaction requested=true 2024-11-22T19:23:10,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:10,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:10,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-22T19:23:10,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-22T19:23:10,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-22T19:23:10,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 700 msec 2024-11-22T19:23:10,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 705 msec 2024-11-22T19:23:10,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:10,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:23:10,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:10,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:10,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:10,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/b487763388514094ab2e9473e0869458 is 50, key is test_row_0/A:col10/1732303390093/Put/seqid=0 2024-11-22T19:23:10,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742212_1388 (size=12301) 2024-11-22T19:23:10,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/b487763388514094ab2e9473e0869458 2024-11-22T19:23:10,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303450127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303450128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303450129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6401aafa6d5d48b8973eaad8fedb85c2 is 50, key is test_row_0/B:col10/1732303390093/Put/seqid=0 2024-11-22T19:23:10,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742213_1389 (size=12301) 2024-11-22T19:23:10,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6401aafa6d5d48b8973eaad8fedb85c2 2024-11-22T19:23:10,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/aeda6ca8144141188ea097ec250a8514 is 50, key is test_row_0/C:col10/1732303390093/Put/seqid=0 2024-11-22T19:23:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742214_1390 (size=12301) 2024-11-22T19:23:10,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/aeda6ca8144141188ea097ec250a8514 2024-11-22T19:23:10,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/b487763388514094ab2e9473e0869458 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b487763388514094ab2e9473e0869458 2024-11-22T19:23:10,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b487763388514094ab2e9473e0869458, entries=150, sequenceid=457, filesize=12.0 K 2024-11-22T19:23:10,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6401aafa6d5d48b8973eaad8fedb85c2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6401aafa6d5d48b8973eaad8fedb85c2 2024-11-22T19:23:10,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6401aafa6d5d48b8973eaad8fedb85c2, entries=150, sequenceid=457, filesize=12.0 K 2024-11-22T19:23:10,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/aeda6ca8144141188ea097ec250a8514 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/aeda6ca8144141188ea097ec250a8514 2024-11-22T19:23:10,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/aeda6ca8144141188ea097ec250a8514, entries=150, sequenceid=457, filesize=12.0 K 2024-11-22T19:23:10,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 583533db2ec7fa9b81dbb4dd334629b0 in 127ms, sequenceid=457, compaction requested=true 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:10,226 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:10,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:10,226 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:10,234 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:10,234 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:10,234 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:10,235 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/068c842b266d457ca6396ad71ff059ef, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e1b0624f68fd43bc945a060485673e44, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/bce6fa366b814b1e8e6a7369894fe66a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6401aafa6d5d48b8973eaad8fedb85c2] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=49.0 K 2024-11-22T19:23:10,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:10,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:10,238 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52598 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:10,238 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 068c842b266d457ca6396ad71ff059ef, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732303388356 2024-11-22T19:23:10,238 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:10,238 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:10,238 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f32b34f86dc94f7ab6f769b58de76a50, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/fa83f8c2395f4724a2ab9780fd100364, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f5bf4dfa9ef34001a57159813e6e2bf1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b487763388514094ab2e9473e0869458] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=51.4 K 2024-11-22T19:23:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,239 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e1b0624f68fd43bc945a060485673e44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732303388484 2024-11-22T19:23:10,239 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f32b34f86dc94f7ab6f769b58de76a50, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732303388356 2024-11-22T19:23:10,240 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting bce6fa366b814b1e8e6a7369894fe66a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1732303388833 2024-11-22T19:23:10,240 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa83f8c2395f4724a2ab9780fd100364, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732303388474 2024-11-22T19:23:10,240 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6401aafa6d5d48b8973eaad8fedb85c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303389475 2024-11-22T19:23:10,241 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5bf4dfa9ef34001a57159813e6e2bf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1732303388833 2024-11-22T19:23:10,243 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b487763388514094ab2e9473e0869458, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303389475 2024-11-22T19:23:10,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7f559d58a7954123bd2d17621426fd72 is 50, key is test_row_0/A:col10/1732303390234/Put/seqid=0 2024-11-22T19:23:10,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303450257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303450257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303450261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,283 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:10,284 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/5a01e96cc8434534933e0e6f146277d1 is 50, key is test_row_0/B:col10/1732303390093/Put/seqid=0 2024-11-22T19:23:10,291 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#338 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:10,292 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/acca12499bf0466f9206ac1fead624b0 is 50, key is test_row_0/A:col10/1732303390093/Put/seqid=0 2024-11-22T19:23:10,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742215_1391 (size=14741) 2024-11-22T19:23:10,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7f559d58a7954123bd2d17621426fd72 2024-11-22T19:23:10,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/935032b3284d4a41ba47c011238c3887 is 50, key is test_row_0/B:col10/1732303390234/Put/seqid=0 2024-11-22T19:23:10,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742217_1393 (size=13391) 2024-11-22T19:23:10,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742216_1392 (size=13391) 2024-11-22T19:23:10,362 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/acca12499bf0466f9206ac1fead624b0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/acca12499bf0466f9206ac1fead624b0 2024-11-22T19:23:10,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303450362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303450363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303450367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,370 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into acca12499bf0466f9206ac1fead624b0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:10,371 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:10,371 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303390226; duration=0sec 2024-11-22T19:23:10,371 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:10,371 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:10,371 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:10,372 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:10,373 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:10,373 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:10,373 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/d7cebb95964743ccb9356c161efb2029, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7e0b0dd57f404d8b8f4577cebcbcb6e3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/097bcd9cb10c41f4bf9372d3ee7be4e1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/aeda6ca8144141188ea097ec250a8514] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=49.0 K 2024-11-22T19:23:10,374 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7cebb95964743ccb9356c161efb2029, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732303388356 2024-11-22T19:23:10,374 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e0b0dd57f404d8b8f4577cebcbcb6e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732303388484 2024-11-22T19:23:10,374 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 097bcd9cb10c41f4bf9372d3ee7be4e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1732303388833 2024-11-22T19:23:10,375 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting aeda6ca8144141188ea097ec250a8514, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303389475 2024-11-22T19:23:10,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742218_1394 (size=12301) 2024-11-22T19:23:10,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/935032b3284d4a41ba47c011238c3887 2024-11-22T19:23:10,395 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#340 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:10,395 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c99e507f9ed2406cbadbffe8933a763c is 50, key is test_row_0/C:col10/1732303390093/Put/seqid=0 2024-11-22T19:23:10,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/8402aca896a642fe825f85832a1eff25 is 50, key is test_row_0/C:col10/1732303390234/Put/seqid=0 2024-11-22T19:23:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T19:23:10,404 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-22T19:23:10,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-22T19:23:10,411 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:10,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T19:23:10,412 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:10,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:10,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742219_1395 (size=13391) 2024-11-22T19:23:10,428 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/c99e507f9ed2406cbadbffe8933a763c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c99e507f9ed2406cbadbffe8933a763c 2024-11-22T19:23:10,434 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into c99e507f9ed2406cbadbffe8933a763c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:10,434 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:10,434 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303390226; duration=0sec 2024-11-22T19:23:10,435 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:10,435 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742220_1396 (size=12301) 2024-11-22T19:23:10,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/8402aca896a642fe825f85832a1eff25 2024-11-22T19:23:10,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7f559d58a7954123bd2d17621426fd72 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7f559d58a7954123bd2d17621426fd72 2024-11-22T19:23:10,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7f559d58a7954123bd2d17621426fd72, entries=200, sequenceid=481, filesize=14.4 K 2024-11-22T19:23:10,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/935032b3284d4a41ba47c011238c3887 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/935032b3284d4a41ba47c011238c3887 2024-11-22T19:23:10,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/935032b3284d4a41ba47c011238c3887, entries=150, sequenceid=481, filesize=12.0 K 2024-11-22T19:23:10,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/8402aca896a642fe825f85832a1eff25 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/8402aca896a642fe825f85832a1eff25 2024-11-22T19:23:10,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/8402aca896a642fe825f85832a1eff25, entries=150, sequenceid=481, filesize=12.0 K 2024-11-22T19:23:10,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 583533db2ec7fa9b81dbb4dd334629b0 in 236ms, sequenceid=481, compaction requested=false 2024-11-22T19:23:10,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T19:23:10,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:10,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-22T19:23:10,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:10,569 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:23:10,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:10,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:10,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aa2f1d12b5d24f1b9bc11e16ff2814d0 is 50, key is test_row_0/A:col10/1732303390569/Put/seqid=0 2024-11-22T19:23:10,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742221_1397 (size=12301) 2024-11-22T19:23:10,596 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aa2f1d12b5d24f1b9bc11e16ff2814d0 2024-11-22T19:23:10,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303450604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/28c4f5aa58864f31b726117eb32210e0 is 50, key is test_row_0/B:col10/1732303390569/Put/seqid=0 2024-11-22T19:23:10,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303450606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303450607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742222_1398 (size=12301) 2024-11-22T19:23:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T19:23:10,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303450713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303450712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303450713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,768 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/5a01e96cc8434534933e0e6f146277d1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/5a01e96cc8434534933e0e6f146277d1 2024-11-22T19:23:10,774 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 5a01e96cc8434534933e0e6f146277d1(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:10,774 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:10,774 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303390226; duration=0sec 2024-11-22T19:23:10,774 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:10,774 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:10,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303450917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303450917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:10,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:10,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303450918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T19:23:11,038 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/28c4f5aa58864f31b726117eb32210e0 2024-11-22T19:23:11,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/4e30f4346fbf40f7b5a4df4c108e3824 is 50, key is test_row_0/C:col10/1732303390569/Put/seqid=0 2024-11-22T19:23:11,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742223_1399 (size=12301) 2024-11-22T19:23:11,080 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/4e30f4346fbf40f7b5a4df4c108e3824 2024-11-22T19:23:11,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/aa2f1d12b5d24f1b9bc11e16ff2814d0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa2f1d12b5d24f1b9bc11e16ff2814d0 2024-11-22T19:23:11,096 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa2f1d12b5d24f1b9bc11e16ff2814d0, entries=150, sequenceid=497, filesize=12.0 K 2024-11-22T19:23:11,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/28c4f5aa58864f31b726117eb32210e0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/28c4f5aa58864f31b726117eb32210e0 2024-11-22T19:23:11,110 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/28c4f5aa58864f31b726117eb32210e0, entries=150, sequenceid=497, filesize=12.0 K 2024-11-22T19:23:11,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/4e30f4346fbf40f7b5a4df4c108e3824 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/4e30f4346fbf40f7b5a4df4c108e3824 2024-11-22T19:23:11,116 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/4e30f4346fbf40f7b5a4df4c108e3824, entries=150, sequenceid=497, filesize=12.0 K 2024-11-22T19:23:11,118 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 583533db2ec7fa9b81dbb4dd334629b0 in 548ms, sequenceid=497, compaction requested=true 2024-11-22T19:23:11,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:11,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-22T19:23:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-22T19:23:11,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-22T19:23:11,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 707 msec 2024-11-22T19:23:11,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 712 msec 2024-11-22T19:23:11,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:11,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:11,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:11,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:11,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:11,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3a49b2ade6cf4bd9b787376cec4682d7 is 50, key is test_row_0/A:col10/1732303391222/Put/seqid=0 2024-11-22T19:23:11,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303451241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303451242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303451243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742224_1400 (size=12301) 2024-11-22T19:23:11,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3a49b2ade6cf4bd9b787376cec4682d7 2024-11-22T19:23:11,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/76cfbad4c7024e9a892e553ee01b3d5a is 50, key is test_row_0/B:col10/1732303391222/Put/seqid=0 2024-11-22T19:23:11,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742225_1401 (size=12301) 2024-11-22T19:23:11,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/76cfbad4c7024e9a892e553ee01b3d5a 2024-11-22T19:23:11,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/77ee3fbb257049adbc6727057778b29d is 50, key is test_row_0/C:col10/1732303391222/Put/seqid=0 2024-11-22T19:23:11,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303451345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303451346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303451348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742226_1402 (size=12301) 2024-11-22T19:23:11,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/77ee3fbb257049adbc6727057778b29d 2024-11-22T19:23:11,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3a49b2ade6cf4bd9b787376cec4682d7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3a49b2ade6cf4bd9b787376cec4682d7 2024-11-22T19:23:11,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3a49b2ade6cf4bd9b787376cec4682d7, entries=150, sequenceid=521, filesize=12.0 K 2024-11-22T19:23:11,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/76cfbad4c7024e9a892e553ee01b3d5a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/76cfbad4c7024e9a892e553ee01b3d5a 2024-11-22T19:23:11,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/76cfbad4c7024e9a892e553ee01b3d5a, entries=150, sequenceid=521, filesize=12.0 K 2024-11-22T19:23:11,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/77ee3fbb257049adbc6727057778b29d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/77ee3fbb257049adbc6727057778b29d 2024-11-22T19:23:11,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/77ee3fbb257049adbc6727057778b29d, entries=150, sequenceid=521, filesize=12.0 K 2024-11-22T19:23:11,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 583533db2ec7fa9b81dbb4dd334629b0 in 184ms, sequenceid=521, compaction requested=true 2024-11-22T19:23:11,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:11,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:11,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:11,408 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:11,408 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:11,409 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:11,409 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52734 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:11,410 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:11,410 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:11,410 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,410 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,410 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/acca12499bf0466f9206ac1fead624b0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7f559d58a7954123bd2d17621426fd72, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa2f1d12b5d24f1b9bc11e16ff2814d0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3a49b2ade6cf4bd9b787376cec4682d7] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=51.5 K 2024-11-22T19:23:11,410 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/5a01e96cc8434534933e0e6f146277d1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/935032b3284d4a41ba47c011238c3887, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/28c4f5aa58864f31b726117eb32210e0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/76cfbad4c7024e9a892e553ee01b3d5a] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=49.1 K 2024-11-22T19:23:11,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:11,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:11,411 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting acca12499bf0466f9206ac1fead624b0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303389475 2024-11-22T19:23:11,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:11,411 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a01e96cc8434534933e0e6f146277d1, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303389475 2024-11-22T19:23:11,411 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f559d58a7954123bd2d17621426fd72, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732303390118 2024-11-22T19:23:11,412 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 935032b3284d4a41ba47c011238c3887, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732303390122 2024-11-22T19:23:11,412 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa2f1d12b5d24f1b9bc11e16ff2814d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303390251 2024-11-22T19:23:11,412 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 28c4f5aa58864f31b726117eb32210e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303390251 2024-11-22T19:23:11,412 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a49b2ade6cf4bd9b787376cec4682d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732303390602 2024-11-22T19:23:11,412 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 76cfbad4c7024e9a892e553ee01b3d5a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732303390602 2024-11-22T19:23:11,438 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:11,439 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7423ce8cef984c4284e8f08c73d8c3a3 is 50, key is test_row_0/A:col10/1732303391222/Put/seqid=0 2024-11-22T19:23:11,444 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#349 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:11,445 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/60b40a3c9a61483db2139562d3b21b2e is 50, key is test_row_0/B:col10/1732303391222/Put/seqid=0 2024-11-22T19:23:11,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742227_1403 (size=13527) 2024-11-22T19:23:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T19:23:11,515 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-22T19:23:11,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-22T19:23:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-22T19:23:11,519 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:11,519 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:11,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742228_1404 (size=13527) 2024-11-22T19:23:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:11,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:23:11,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:11,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:11,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:11,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/00c548f3b0b14ac3af081743fe0e4983 is 50, key is test_row_0/A:col10/1732303391240/Put/seqid=0 2024-11-22T19:23:11,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303451591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303451592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303451592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742229_1405 (size=12301) 2024-11-22T19:23:11,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/00c548f3b0b14ac3af081743fe0e4983 2024-11-22T19:23:11,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/489f27bed39f467197005101ef357e9f is 50, key is test_row_0/B:col10/1732303391240/Put/seqid=0 2024-11-22T19:23:11,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-22T19:23:11,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742230_1406 (size=12301) 2024-11-22T19:23:11,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/489f27bed39f467197005101ef357e9f 2024-11-22T19:23:11,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/84b27e77728d49f489cbf6ebe63fe130 is 50, key is test_row_0/C:col10/1732303391240/Put/seqid=0 2024-11-22T19:23:11,671 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:11,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-22T19:23:11,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:11,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,672 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:11,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:11,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:11,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1732303451678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,681 DEBUG [Thread-1333 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18231 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:11,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742231_1407 (size=12301) 2024-11-22T19:23:11,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/84b27e77728d49f489cbf6ebe63fe130 2024-11-22T19:23:11,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303451697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303451697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303451697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/00c548f3b0b14ac3af081743fe0e4983 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/00c548f3b0b14ac3af081743fe0e4983 2024-11-22T19:23:11,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/00c548f3b0b14ac3af081743fe0e4983, entries=150, sequenceid=535, filesize=12.0 K 2024-11-22T19:23:11,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/489f27bed39f467197005101ef357e9f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/489f27bed39f467197005101ef357e9f 2024-11-22T19:23:11,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/489f27bed39f467197005101ef357e9f, entries=150, sequenceid=535, filesize=12.0 K 2024-11-22T19:23:11,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/84b27e77728d49f489cbf6ebe63fe130 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/84b27e77728d49f489cbf6ebe63fe130 2024-11-22T19:23:11,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/84b27e77728d49f489cbf6ebe63fe130, entries=150, sequenceid=535, filesize=12.0 K 2024-11-22T19:23:11,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 583533db2ec7fa9b81dbb4dd334629b0 in 172ms, sequenceid=535, compaction requested=true 2024-11-22T19:23:11,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:11,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:11,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T19:23:11,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-22T19:23:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:11,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:11,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:11,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:11,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:11,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/a55dfafa9b844a1fbb122cb924f9e8c1 is 50, key is test_row_0/A:col10/1732303391736/Put/seqid=0 2024-11-22T19:23:11,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303451782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742232_1408 (size=12301) 2024-11-22T19:23:11,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/a55dfafa9b844a1fbb122cb924f9e8c1 2024-11-22T19:23:11,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2a4aada8ad4848768b264ad774189bce is 50, key is test_row_0/B:col10/1732303391736/Put/seqid=0 2024-11-22T19:23:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-22T19:23:11,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:11,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-22T19:23:11,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:11,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:11,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:11,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:11,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742233_1409 (size=12301) 2024-11-22T19:23:11,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2a4aada8ad4848768b264ad774189bce 2024-11-22T19:23:11,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/be7d63bec44b45d1bb15987fd09dbf1d is 50, key is test_row_0/C:col10/1732303391736/Put/seqid=0 2024-11-22T19:23:11,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303451888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742234_1410 (size=12301) 2024-11-22T19:23:11,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/be7d63bec44b45d1bb15987fd09dbf1d 2024-11-22T19:23:11,899 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/7423ce8cef984c4284e8f08c73d8c3a3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7423ce8cef984c4284e8f08c73d8c3a3 2024-11-22T19:23:11,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/a55dfafa9b844a1fbb122cb924f9e8c1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a55dfafa9b844a1fbb122cb924f9e8c1 2024-11-22T19:23:11,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303451901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303451902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303451902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:11,907 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 7423ce8cef984c4284e8f08c73d8c3a3(size=13.2 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:11,907 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:11,907 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303391408; duration=0sec 2024-11-22T19:23:11,907 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-22T19:23:11,907 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:11,907 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:11,908 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-22T19:23:11,911 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62595 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-22T19:23:11,912 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:11,912 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a55dfafa9b844a1fbb122cb924f9e8c1, entries=150, sequenceid=558, filesize=12.0 K 2024-11-22T19:23:11,912 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c99e507f9ed2406cbadbffe8933a763c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/8402aca896a642fe825f85832a1eff25, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/4e30f4346fbf40f7b5a4df4c108e3824, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/77ee3fbb257049adbc6727057778b29d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/84b27e77728d49f489cbf6ebe63fe130] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=61.1 K 2024-11-22T19:23:11,912 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c99e507f9ed2406cbadbffe8933a763c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1732303389475 2024-11-22T19:23:11,913 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8402aca896a642fe825f85832a1eff25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732303390122 2024-11-22T19:23:11,913 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e30f4346fbf40f7b5a4df4c108e3824, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1732303390251 2024-11-22T19:23:11,914 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77ee3fbb257049adbc6727057778b29d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732303390602 2024-11-22T19:23:11,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/2a4aada8ad4848768b264ad774189bce as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2a4aada8ad4848768b264ad774189bce 2024-11-22T19:23:11,915 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84b27e77728d49f489cbf6ebe63fe130, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732303391235 2024-11-22T19:23:11,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2a4aada8ad4848768b264ad774189bce, entries=150, sequenceid=558, filesize=12.0 K 2024-11-22T19:23:11,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/be7d63bec44b45d1bb15987fd09dbf1d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/be7d63bec44b45d1bb15987fd09dbf1d 2024-11-22T19:23:11,928 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/60b40a3c9a61483db2139562d3b21b2e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/60b40a3c9a61483db2139562d3b21b2e 2024-11-22T19:23:11,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/be7d63bec44b45d1bb15987fd09dbf1d, entries=150, sequenceid=558, filesize=12.0 K 2024-11-22T19:23:11,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 583533db2ec7fa9b81dbb4dd334629b0 in 191ms, sequenceid=558, compaction requested=true 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=5), splitQueue=0 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:11,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=6), splitQueue=0 2024-11-22T19:23:11,936 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 60b40a3c9a61483db2139562d3b21b2e(size=13.2 K), total size for store is 37.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:11,936 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:11,936 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303391408; duration=0sec 2024-11-22T19:23:11,936 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=6), splitQueue=0 2024-11-22T19:23:11,936 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:11,936 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 5 compacting, 1 eligible, 16 blocking 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. because compaction request was cancelled 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:11,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 5 compacting, 1 eligible, 16 blocking 2024-11-22T19:23:11,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T19:23:11,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T19:23:11,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. because compaction request was cancelled 2024-11-22T19:23:11,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:11,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:11,939 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:11,939 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:11,939 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,940 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/60b40a3c9a61483db2139562d3b21b2e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/489f27bed39f467197005101ef357e9f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2a4aada8ad4848768b264ad774189bce] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=37.2 K 2024-11-22T19:23:11,941 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 60b40a3c9a61483db2139562d3b21b2e, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732303390602 2024-11-22T19:23:11,941 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 489f27bed39f467197005101ef357e9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732303391235 2024-11-22T19:23:11,941 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a4aada8ad4848768b264ad774189bce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732303391584 2024-11-22T19:23:11,948 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#356 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:11,950 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/ae525dbeb6b2482c8dccc605268893f1 is 50, key is test_row_0/C:col10/1732303391240/Put/seqid=0 2024-11-22T19:23:11,959 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:11,962 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e507dab0189741ba97cd010489b5c33c is 50, key is test_row_0/B:col10/1732303391736/Put/seqid=0 2024-11-22T19:23:11,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:11,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:11,983 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742235_1411 (size=13561) 2024-11-22T19:23:12,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/dca9e72a5b244287ab12bda2f663128a is 50, key is test_row_0/A:col10/1732303391752/Put/seqid=0 2024-11-22T19:23:12,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742236_1412 (size=13629) 2024-11-22T19:23:12,040 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/e507dab0189741ba97cd010489b5c33c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e507dab0189741ba97cd010489b5c33c 2024-11-22T19:23:12,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742237_1413 (size=12301) 2024-11-22T19:23:12,053 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/dca9e72a5b244287ab12bda2f663128a 2024-11-22T19:23:12,057 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into e507dab0189741ba97cd010489b5c33c(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:12,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,057 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=13, startTime=1732303391725; duration=0sec 2024-11-22T19:23:12,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T19:23:12,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:12,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:12,058 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:12,058 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:12,058 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,059 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7423ce8cef984c4284e8f08c73d8c3a3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/00c548f3b0b14ac3af081743fe0e4983, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a55dfafa9b844a1fbb122cb924f9e8c1] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=37.2 K 2024-11-22T19:23:12,060 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7423ce8cef984c4284e8f08c73d8c3a3, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732303390602 2024-11-22T19:23:12,061 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 00c548f3b0b14ac3af081743fe0e4983, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732303391235 2024-11-22T19:23:12,063 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a55dfafa9b844a1fbb122cb924f9e8c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732303391584 2024-11-22T19:23:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/88ecbace09dc4755be19ded4902cdc3b is 50, key is test_row_0/B:col10/1732303391752/Put/seqid=0 2024-11-22T19:23:12,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742238_1414 (size=12301) 2024-11-22T19:23:12,085 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/88ecbace09dc4755be19ded4902cdc3b 2024-11-22T19:23:12,093 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:12,093 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/9d9ed47c42b345cabd126346750bf2fa is 50, key is test_row_0/A:col10/1732303391736/Put/seqid=0 2024-11-22T19:23:12,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:12,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:12,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/fd6e3195719842b2966905ea23b791f5 is 50, key is test_row_0/C:col10/1732303391752/Put/seqid=0 2024-11-22T19:23:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-22T19:23:12,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742239_1415 (size=13629) 2024-11-22T19:23:12,149 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/9d9ed47c42b345cabd126346750bf2fa as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9d9ed47c42b345cabd126346750bf2fa 2024-11-22T19:23:12,156 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 9d9ed47c42b345cabd126346750bf2fa(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:12,156 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,156 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=13, startTime=1732303391931; duration=0sec 2024-11-22T19:23:12,156 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:12,156 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:12,156 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:12,156 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:12,156 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. because compaction request was cancelled 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T19:23:12,157 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. because compaction request was cancelled 2024-11-22T19:23:12,158 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742240_1416 (size=12301) 2024-11-22T19:23:12,168 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/fd6e3195719842b2966905ea23b791f5 2024-11-22T19:23:12,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/dca9e72a5b244287ab12bda2f663128a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/dca9e72a5b244287ab12bda2f663128a 2024-11-22T19:23:12,182 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/dca9e72a5b244287ab12bda2f663128a, entries=150, sequenceid=573, filesize=12.0 K 2024-11-22T19:23:12,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/88ecbace09dc4755be19ded4902cdc3b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/88ecbace09dc4755be19ded4902cdc3b 2024-11-22T19:23:12,188 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/88ecbace09dc4755be19ded4902cdc3b, entries=150, sequenceid=573, filesize=12.0 K 2024-11-22T19:23:12,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/fd6e3195719842b2966905ea23b791f5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fd6e3195719842b2966905ea23b791f5 2024-11-22T19:23:12,195 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fd6e3195719842b2966905ea23b791f5, entries=150, sequenceid=573, filesize=12.0 K 2024-11-22T19:23:12,196 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 583533db2ec7fa9b81dbb4dd334629b0 in 213ms, sequenceid=573, compaction requested=false 2024-11-22T19:23:12,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-22T19:23:12,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-22T19:23:12,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:12,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:23:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-22T19:23:12,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 678 msec 2024-11-22T19:23:12,204 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 685 msec 2024-11-22T19:23:12,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3320e785854446ccb35cbb8a5d1e2805 is 50, key is test_row_0/A:col10/1732303392184/Put/seqid=0 2024-11-22T19:23:12,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303452215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303452219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303452220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303452221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742241_1417 (size=12301) 2024-11-22T19:23:12,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3320e785854446ccb35cbb8a5d1e2805 2024-11-22T19:23:12,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6c992448244341018c6df6a1a342d1cd is 50, key is test_row_0/B:col10/1732303392184/Put/seqid=0 2024-11-22T19:23:12,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742242_1418 (size=12301) 2024-11-22T19:23:12,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6c992448244341018c6df6a1a342d1cd 2024-11-22T19:23:12,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/3f6671608f3f4c14a6516289f62005e4 is 50, key is test_row_0/C:col10/1732303392184/Put/seqid=0 2024-11-22T19:23:12,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303452322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303452323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 310 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303452326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303452327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742243_1419 (size=12301) 2024-11-22T19:23:12,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/3f6671608f3f4c14a6516289f62005e4 2024-11-22T19:23:12,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/3320e785854446ccb35cbb8a5d1e2805 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3320e785854446ccb35cbb8a5d1e2805 2024-11-22T19:23:12,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3320e785854446ccb35cbb8a5d1e2805, entries=150, sequenceid=599, filesize=12.0 K 2024-11-22T19:23:12,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6c992448244341018c6df6a1a342d1cd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6c992448244341018c6df6a1a342d1cd 2024-11-22T19:23:12,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6c992448244341018c6df6a1a342d1cd, entries=150, sequenceid=599, filesize=12.0 K 2024-11-22T19:23:12,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/3f6671608f3f4c14a6516289f62005e4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3f6671608f3f4c14a6516289f62005e4 2024-11-22T19:23:12,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3f6671608f3f4c14a6516289f62005e4, entries=150, sequenceid=599, filesize=12.0 K 2024-11-22T19:23:12,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 583533db2ec7fa9b81dbb4dd334629b0 in 184ms, sequenceid=599, compaction requested=true 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:12,383 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:12,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:12,385 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:12,385 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:12,385 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,385 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9d9ed47c42b345cabd126346750bf2fa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/dca9e72a5b244287ab12bda2f663128a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3320e785854446ccb35cbb8a5d1e2805] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=37.3 K 2024-11-22T19:23:12,385 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d9ed47c42b345cabd126346750bf2fa, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732303391584 2024-11-22T19:23:12,386 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting dca9e72a5b244287ab12bda2f663128a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1732303391752 2024-11-22T19:23:12,386 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3320e785854446ccb35cbb8a5d1e2805, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1732303392184 2024-11-22T19:23:12,394 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:12,395 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/740b60bde58c4c828282ca88c96845ac is 50, key is test_row_0/A:col10/1732303392184/Put/seqid=0 2024-11-22T19:23:12,417 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/ae525dbeb6b2482c8dccc605268893f1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/ae525dbeb6b2482c8dccc605268893f1 2024-11-22T19:23:12,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742244_1420 (size=13731) 2024-11-22T19:23:12,426 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into ae525dbeb6b2482c8dccc605268893f1(size=13.2 K), total size for store is 49.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:12,426 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,426 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=11, startTime=1732303391725; duration=0sec 2024-11-22T19:23:12,426 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T19:23:12,426 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:12,426 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:12,426 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:12,429 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50464 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:12,429 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:12,429 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,429 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/ae525dbeb6b2482c8dccc605268893f1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/be7d63bec44b45d1bb15987fd09dbf1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fd6e3195719842b2966905ea23b791f5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3f6671608f3f4c14a6516289f62005e4] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=49.3 K 2024-11-22T19:23:12,430 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae525dbeb6b2482c8dccc605268893f1, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732303391235 2024-11-22T19:23:12,431 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting be7d63bec44b45d1bb15987fd09dbf1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732303391584 2024-11-22T19:23:12,432 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/740b60bde58c4c828282ca88c96845ac as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/740b60bde58c4c828282ca88c96845ac 2024-11-22T19:23:12,432 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd6e3195719842b2966905ea23b791f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1732303391752 2024-11-22T19:23:12,433 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f6671608f3f4c14a6516289f62005e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1732303392184 2024-11-22T19:23:12,439 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 740b60bde58c4c828282ca88c96845ac(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:12,439 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,439 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=13, startTime=1732303392383; duration=0sec 2024-11-22T19:23:12,440 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:12,440 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:12,440 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:12,441 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:12,441 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:12,442 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,442 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e507dab0189741ba97cd010489b5c33c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/88ecbace09dc4755be19ded4902cdc3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6c992448244341018c6df6a1a342d1cd] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=37.3 K 2024-11-22T19:23:12,442 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e507dab0189741ba97cd010489b5c33c, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732303391584 2024-11-22T19:23:12,443 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 88ecbace09dc4755be19ded4902cdc3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1732303391752 2024-11-22T19:23:12,444 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c992448244341018c6df6a1a342d1cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1732303392184 2024-11-22T19:23:12,458 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#367 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:12,458 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#366 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:12,458 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/964a1b0aa7964735a9fe892b684bb3f1 is 50, key is test_row_0/B:col10/1732303392184/Put/seqid=0 2024-11-22T19:23:12,460 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/fecc9866a5cc49619dd11757325ee7c1 is 50, key is test_row_0/C:col10/1732303392184/Put/seqid=0 2024-11-22T19:23:12,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742245_1421 (size=13731) 2024-11-22T19:23:12,499 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/964a1b0aa7964735a9fe892b684bb3f1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/964a1b0aa7964735a9fe892b684bb3f1 2024-11-22T19:23:12,506 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 964a1b0aa7964735a9fe892b684bb3f1(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:12,506 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,506 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=13, startTime=1732303392383; duration=0sec 2024-11-22T19:23:12,506 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:12,506 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:12,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742246_1422 (size=13697) 2024-11-22T19:23:12,524 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/fecc9866a5cc49619dd11757325ee7c1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fecc9866a5cc49619dd11757325ee7c1 2024-11-22T19:23:12,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:12,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:23:12,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:12,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:12,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:12,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:12,531 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into fecc9866a5cc49619dd11757325ee7c1(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:12,531 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:12,531 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303392383; duration=0sec 2024-11-22T19:23:12,531 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:12,531 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:12,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/18b5001883e747999aeee9bd3b8209d7 is 50, key is test_row_1/A:col10/1732303392528/Put/seqid=0 2024-11-22T19:23:12,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742247_1423 (size=9857) 2024-11-22T19:23:12,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=616 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/18b5001883e747999aeee9bd3b8209d7 2024-11-22T19:23:12,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6d1f413107354aa3902354a297496c29 is 50, key is test_row_1/B:col10/1732303392528/Put/seqid=0 2024-11-22T19:23:12,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742248_1424 (size=9857) 2024-11-22T19:23:12,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=616 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6d1f413107354aa3902354a297496c29 2024-11-22T19:23:12,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/445fb1e4fe1647b9ac555ed85a938b0f is 50, key is test_row_1/C:col10/1732303392528/Put/seqid=0 2024-11-22T19:23:12,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742249_1425 (size=9857) 2024-11-22T19:23:12,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-22T19:23:12,622 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-22T19:23:12,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303452619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303452621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303452619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303452623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:12,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-11-22T19:23:12,628 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:12,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T19:23:12,628 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:12,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:12,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303452724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 318 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303452724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 318 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303452724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303452727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T19:23:12,781 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:12,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-22T19:23:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T19:23:12,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 320 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303452928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303452929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 320 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303452930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303452931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:12,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:12,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-22T19:23:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:12,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:12,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:12,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:12,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:12,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=616 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/445fb1e4fe1647b9ac555ed85a938b0f 2024-11-22T19:23:13,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/18b5001883e747999aeee9bd3b8209d7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/18b5001883e747999aeee9bd3b8209d7 2024-11-22T19:23:13,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/18b5001883e747999aeee9bd3b8209d7, entries=100, sequenceid=616, filesize=9.6 K 2024-11-22T19:23:13,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/6d1f413107354aa3902354a297496c29 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6d1f413107354aa3902354a297496c29 2024-11-22T19:23:13,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6d1f413107354aa3902354a297496c29, entries=100, sequenceid=616, filesize=9.6 K 2024-11-22T19:23:13,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/445fb1e4fe1647b9ac555ed85a938b0f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/445fb1e4fe1647b9ac555ed85a938b0f 2024-11-22T19:23:13,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/445fb1e4fe1647b9ac555ed85a938b0f, entries=100, sequenceid=616, filesize=9.6 K 2024-11-22T19:23:13,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 583533db2ec7fa9b81dbb4dd334629b0 in 498ms, sequenceid=616, compaction requested=false 2024-11-22T19:23:13,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:13,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:13,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-22T19:23:13,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:13,089 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T19:23:13,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:13,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:13,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:13,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:13,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:13,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:13,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/df79c96ae84342c98d87c289f5f6e0df is 50, key is test_row_0/A:col10/1732303392619/Put/seqid=0 2024-11-22T19:23:13,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742250_1426 (size=12301) 2024-11-22T19:23:13,127 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=639 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/df79c96ae84342c98d87c289f5f6e0df 2024-11-22T19:23:13,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/fa46959e9cd74c9ca7dd241d196db708 is 50, key is test_row_0/B:col10/1732303392619/Put/seqid=0 2024-11-22T19:23:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742251_1427 (size=12301) 2024-11-22T19:23:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T19:23:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:13,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. as already flushing 2024-11-22T19:23:13,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303453242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303453243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303453244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303453245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303453347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303453347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303453348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303453348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303453550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303453551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303453551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303453552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,564 DEBUG [Thread-1342 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:57120 2024-11-22T19:23:13,564 DEBUG [Thread-1342 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:13,565 DEBUG [Thread-1340 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131ceb8f to 127.0.0.1:57120 2024-11-22T19:23:13,565 DEBUG [Thread-1344 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328852db to 127.0.0.1:57120 2024-11-22T19:23:13,565 DEBUG [Thread-1344 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:13,566 DEBUG [Thread-1340 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:13,567 DEBUG [Thread-1336 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:57120 2024-11-22T19:23:13,567 DEBUG [Thread-1336 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:13,568 DEBUG [Thread-1338 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e575aa to 127.0.0.1:57120 2024-11-22T19:23:13,569 DEBUG [Thread-1338 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:13,572 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=639 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/fa46959e9cd74c9ca7dd241d196db708 2024-11-22T19:23:13,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/3db0feac4dfd4d0c99eec41f278669df is 50, key is test_row_0/C:col10/1732303392619/Put/seqid=0 2024-11-22T19:23:13,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742252_1428 (size=12301) 2024-11-22T19:23:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T19:23:13,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55852 deadline: 1732303453853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55804 deadline: 1732303453854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55878 deadline: 1732303453854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55844 deadline: 1732303453854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:13,991 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=639 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/3db0feac4dfd4d0c99eec41f278669df 2024-11-22T19:23:13,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/df79c96ae84342c98d87c289f5f6e0df as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/df79c96ae84342c98d87c289f5f6e0df 2024-11-22T19:23:13,999 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/df79c96ae84342c98d87c289f5f6e0df, entries=150, sequenceid=639, filesize=12.0 K 2024-11-22T19:23:14,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/fa46959e9cd74c9ca7dd241d196db708 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/fa46959e9cd74c9ca7dd241d196db708 2024-11-22T19:23:14,005 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/fa46959e9cd74c9ca7dd241d196db708, entries=150, sequenceid=639, filesize=12.0 K 2024-11-22T19:23:14,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/3db0feac4dfd4d0c99eec41f278669df as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3db0feac4dfd4d0c99eec41f278669df 2024-11-22T19:23:14,010 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3db0feac4dfd4d0c99eec41f278669df, entries=150, sequenceid=639, filesize=12.0 K 2024-11-22T19:23:14,011 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 583533db2ec7fa9b81dbb4dd334629b0 in 922ms, sequenceid=639, compaction requested=true 2024-11-22T19:23:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-11-22T19:23:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-11-22T19:23:14,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-22T19:23:14,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3830 sec 2024-11-22T19:23:14,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 1.3880 sec 2024-11-22T19:23:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:14,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T19:23:14,357 DEBUG [Thread-1327 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72aa9ee5 to 127.0.0.1:57120 2024-11-22T19:23:14,358 DEBUG [Thread-1327 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:14,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:14,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:14,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:14,359 DEBUG [Thread-1325 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x669e1999 to 127.0.0.1:57120 2024-11-22T19:23:14,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:14,359 DEBUG [Thread-1325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:14,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:14,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:14,363 DEBUG [Thread-1331 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:57120 2024-11-22T19:23:14,363 DEBUG [Thread-1329 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:57120 2024-11-22T19:23:14,363 DEBUG [Thread-1331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:14,363 DEBUG [Thread-1329 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:14,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/b4801353e1f24f75b81c42c7304e7a63 is 50, key is test_row_0/A:col10/1732303393243/Put/seqid=0 2024-11-22T19:23:14,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742253_1429 (size=12301) 2024-11-22T19:23:14,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T19:23:14,735 INFO [Thread-1335 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-22T19:23:14,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/b4801353e1f24f75b81c42c7304e7a63 2024-11-22T19:23:14,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a106a298e3d14c35b083aac66c706636 is 50, key is test_row_0/B:col10/1732303393243/Put/seqid=0 2024-11-22T19:23:14,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742254_1430 (size=12301) 2024-11-22T19:23:14,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a106a298e3d14c35b083aac66c706636 2024-11-22T19:23:14,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/b95e1a07370142828c0cbf711a6570b9 is 50, key is test_row_0/C:col10/1732303393243/Put/seqid=0 2024-11-22T19:23:14,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742255_1431 (size=12301) 2024-11-22T19:23:14,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/b95e1a07370142828c0cbf711a6570b9 2024-11-22T19:23:14,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/b4801353e1f24f75b81c42c7304e7a63 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b4801353e1f24f75b81c42c7304e7a63 2024-11-22T19:23:14,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b4801353e1f24f75b81c42c7304e7a63, entries=150, sequenceid=655, filesize=12.0 K 2024-11-22T19:23:14,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a106a298e3d14c35b083aac66c706636 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a106a298e3d14c35b083aac66c706636 2024-11-22T19:23:14,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a106a298e3d14c35b083aac66c706636, entries=150, sequenceid=655, filesize=12.0 K 2024-11-22T19:23:14,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/b95e1a07370142828c0cbf711a6570b9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/b95e1a07370142828c0cbf711a6570b9 2024-11-22T19:23:14,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/b95e1a07370142828c0cbf711a6570b9, entries=150, sequenceid=655, filesize=12.0 K 2024-11-22T19:23:14,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=13.42 KB/13740 for 583533db2ec7fa9b81dbb4dd334629b0 in 469ms, sequenceid=655, compaction requested=true 2024-11-22T19:23:14,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:14,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:14,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:14,826 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:14,826 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 583533db2ec7fa9b81dbb4dd334629b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:14,827 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:14,827 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:14,827 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/B is initiating minor compaction (all files) 2024-11-22T19:23:14,828 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/A is initiating minor compaction (all files) 2024-11-22T19:23:14,828 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/B in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:14,828 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/A in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:14,828 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/964a1b0aa7964735a9fe892b684bb3f1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6d1f413107354aa3902354a297496c29, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/fa46959e9cd74c9ca7dd241d196db708, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a106a298e3d14c35b083aac66c706636] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.1 K 2024-11-22T19:23:14,828 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/740b60bde58c4c828282ca88c96845ac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/18b5001883e747999aeee9bd3b8209d7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/df79c96ae84342c98d87c289f5f6e0df, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b4801353e1f24f75b81c42c7304e7a63] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.1 K 2024-11-22T19:23:14,828 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 964a1b0aa7964735a9fe892b684bb3f1, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1732303392184 2024-11-22T19:23:14,828 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 740b60bde58c4c828282ca88c96845ac, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1732303392184 2024-11-22T19:23:14,828 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d1f413107354aa3902354a297496c29, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=616, earliestPutTs=1732303392218 2024-11-22T19:23:14,828 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18b5001883e747999aeee9bd3b8209d7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=616, earliestPutTs=1732303392218 2024-11-22T19:23:14,829 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting df79c96ae84342c98d87c289f5f6e0df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1732303392570 2024-11-22T19:23:14,829 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting fa46959e9cd74c9ca7dd241d196db708, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1732303392570 2024-11-22T19:23:14,829 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a106a298e3d14c35b083aac66c706636, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=655, earliestPutTs=1732303393238 2024-11-22T19:23:14,829 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4801353e1f24f75b81c42c7304e7a63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=655, earliestPutTs=1732303393238 2024-11-22T19:23:14,840 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#A#compaction#377 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:14,840 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#B#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:14,841 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/4b649b3c20744ddfb47348d22546f16d is 50, key is test_row_0/B:col10/1732303393243/Put/seqid=0 2024-11-22T19:23:14,841 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4e82ca3f15b3429f9c457ca992201f8e is 50, key is test_row_0/A:col10/1732303393243/Put/seqid=0 2024-11-22T19:23:14,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742256_1432 (size=13867) 2024-11-22T19:23:14,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742257_1433 (size=13867) 2024-11-22T19:23:15,257 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/4e82ca3f15b3429f9c457ca992201f8e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4e82ca3f15b3429f9c457ca992201f8e 2024-11-22T19:23:15,258 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/4b649b3c20744ddfb47348d22546f16d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/4b649b3c20744ddfb47348d22546f16d 2024-11-22T19:23:15,263 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/A of 583533db2ec7fa9b81dbb4dd334629b0 into 4e82ca3f15b3429f9c457ca992201f8e(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:15,263 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/A, priority=12, startTime=1732303394826; duration=0sec 2024-11-22T19:23:15,263 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/B of 583533db2ec7fa9b81dbb4dd334629b0 into 4b649b3c20744ddfb47348d22546f16d(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:A 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:15,263 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/B, priority=12, startTime=1732303394826; duration=0sec 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:15,263 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:B 2024-11-22T19:23:15,265 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48156 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:23:15,265 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 583533db2ec7fa9b81dbb4dd334629b0/C is initiating minor compaction (all files) 2024-11-22T19:23:15,265 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 583533db2ec7fa9b81dbb4dd334629b0/C in TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:15,265 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fecc9866a5cc49619dd11757325ee7c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/445fb1e4fe1647b9ac555ed85a938b0f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3db0feac4dfd4d0c99eec41f278669df, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/b95e1a07370142828c0cbf711a6570b9] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp, totalSize=47.0 K 2024-11-22T19:23:15,265 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fecc9866a5cc49619dd11757325ee7c1, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1732303392184 2024-11-22T19:23:15,266 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 445fb1e4fe1647b9ac555ed85a938b0f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=616, earliestPutTs=1732303392218 2024-11-22T19:23:15,266 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3db0feac4dfd4d0c99eec41f278669df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1732303392570 2024-11-22T19:23:15,266 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b95e1a07370142828c0cbf711a6570b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=655, earliestPutTs=1732303393238 2024-11-22T19:23:15,277 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 583533db2ec7fa9b81dbb4dd334629b0#C#compaction#379 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:15,277 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/89572258542748859a1bbadddf887bf7 is 50, key is test_row_0/C:col10/1732303393243/Put/seqid=0 2024-11-22T19:23:15,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742258_1434 (size=13833) 2024-11-22T19:23:15,303 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/89572258542748859a1bbadddf887bf7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/89572258542748859a1bbadddf887bf7 2024-11-22T19:23:15,308 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 583533db2ec7fa9b81dbb4dd334629b0/C of 583533db2ec7fa9b81dbb4dd334629b0 into 89572258542748859a1bbadddf887bf7(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:15,308 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:15,308 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0., storeName=583533db2ec7fa9b81dbb4dd334629b0/C, priority=12, startTime=1732303394827; duration=0sec 2024-11-22T19:23:15,308 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:15,308 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 583533db2ec7fa9b81dbb4dd334629b0:C 2024-11-22T19:23:21,685 DEBUG [Thread-1333 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:57120 2024-11-22T19:23:21,685 DEBUG [Thread-1333 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:21,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T19:23:21,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 166 2024-11-22T19:23:21,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-11-22T19:23:21,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 172 2024-11-22T19:23:21,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 138 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4773 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4627 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4617 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4760 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4644 2024-11-22T19:23:21,686 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T19:23:21,686 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:23:21,686 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b6adc5 to 127.0.0.1:57120 2024-11-22T19:23:21,686 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:21,686 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T19:23:21,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T19:23:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-22T19:23:21,689 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303401689"}]},"ts":"1732303401689"} 2024-11-22T19:23:21,690 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T19:23:21,691 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T19:23:21,692 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:23:21,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, UNASSIGN}] 2024-11-22T19:23:21,693 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, UNASSIGN 2024-11-22T19:23:21,694 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=583533db2ec7fa9b81dbb4dd334629b0, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:21,695 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:23:21,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure 583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:23:21,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-22T19:23:21,846 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:21,846 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:21,846 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:23:21,846 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing 583533db2ec7fa9b81dbb4dd334629b0, disabling compactions & flushes 2024-11-22T19:23:21,846 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:21,846 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:21,846 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. after waiting 0 ms 2024-11-22T19:23:21,846 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:21,846 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(2837): Flushing 583533db2ec7fa9b81dbb4dd334629b0 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-22T19:23:21,847 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=A 2024-11-22T19:23:21,847 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:21,847 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=B 2024-11-22T19:23:21,847 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:21,847 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 583533db2ec7fa9b81dbb4dd334629b0, store=C 2024-11-22T19:23:21,847 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:21,850 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/72ebd9302dd7468cb831e84e12f00d5e is 50, key is test_row_0/A:col10/1732303401684/Put/seqid=0 2024-11-22T19:23:21,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742259_1435 (size=12301) 2024-11-22T19:23:21,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-22T19:23:22,254 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/72ebd9302dd7468cb831e84e12f00d5e 2024-11-22T19:23:22,260 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a3908cecc0984a4a952e163ee19ee3d6 is 50, key is test_row_0/B:col10/1732303401684/Put/seqid=0 2024-11-22T19:23:22,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742260_1436 (size=12301) 2024-11-22T19:23:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-22T19:23:22,401 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T19:23:22,663 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a3908cecc0984a4a952e163ee19ee3d6 2024-11-22T19:23:22,670 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a861ee24be314fe183fb2538620b8863 is 50, key is test_row_0/C:col10/1732303401684/Put/seqid=0 2024-11-22T19:23:22,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742261_1437 (size=12301) 2024-11-22T19:23:22,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-22T19:23:23,074 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a861ee24be314fe183fb2538620b8863 2024-11-22T19:23:23,077 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/A/72ebd9302dd7468cb831e84e12f00d5e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/72ebd9302dd7468cb831e84e12f00d5e 2024-11-22T19:23:23,080 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/72ebd9302dd7468cb831e84e12f00d5e, entries=150, sequenceid=664, filesize=12.0 K 2024-11-22T19:23:23,080 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/B/a3908cecc0984a4a952e163ee19ee3d6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3908cecc0984a4a952e163ee19ee3d6 2024-11-22T19:23:23,083 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3908cecc0984a4a952e163ee19ee3d6, entries=150, sequenceid=664, filesize=12.0 K 2024-11-22T19:23:23,083 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/.tmp/C/a861ee24be314fe183fb2538620b8863 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a861ee24be314fe183fb2538620b8863 2024-11-22T19:23:23,086 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a861ee24be314fe183fb2538620b8863, entries=150, sequenceid=664, filesize=12.0 K 2024-11-22T19:23:23,087 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 583533db2ec7fa9b81dbb4dd334629b0 in 1241ms, sequenceid=664, compaction requested=false 2024-11-22T19:23:23,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e998fc66c6704f2aa6ba1f3281eb20ff, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6f4973880e44a02a1883d142ca29ad2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/8284d7cd94a24778ad876f7ff1cccfd6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4b9430fa6ba14e4091144bdbff542e87, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6a937864e2c47acbae6dfb1f4c22134, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9996d997f48947daafa8cb0a6c20ee78, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3445bdf1db0f42e88bbe13877459e4b0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/89d5151c0bc44e5190615458b9dc8e49, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/2e4c79dbdb254747a36e7c8d2a7529ae, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/41ab8b729f38403ab08c3fbfa6665c45, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f9f71a3ea4e5433c9b31c5792bc94136, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c323b2c63fad42fab71c664d62d65d82, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c986815811ec4fae9e33f99971544f2e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4c8c57a2af7f4b76829be77ce87233b1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7ae264b4011e4ba0af51df78cfb57b2a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f0f5bc87d69a4eb696bea74fec596085, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/98ca44d60a944bc7be1b87b5c195aa2b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/639ddd5acbc84a12883cc0f2c58454f8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a2b417f8f2fa452b899a225a5c54449d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa5b2408b4d44f3ea6e053ea590ca109, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e2c645302e5f42bb92db86f7175992cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e157f187fc8842d4bf4c693c55c4a839, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/04b0ee589b6e4a77860967b8bd44bbfd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/93453b641b0d4db295e62a0ca57c8d69, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d1e2f6a326c74f5b99874dcf189920a3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f32b34f86dc94f7ab6f769b58de76a50, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/49ca99f074db49f8b09e5a453bc1f5c8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/fa83f8c2395f4724a2ab9780fd100364, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f5bf4dfa9ef34001a57159813e6e2bf1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/acca12499bf0466f9206ac1fead624b0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b487763388514094ab2e9473e0869458, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7f559d58a7954123bd2d17621426fd72, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa2f1d12b5d24f1b9bc11e16ff2814d0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7423ce8cef984c4284e8f08c73d8c3a3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3a49b2ade6cf4bd9b787376cec4682d7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/00c548f3b0b14ac3af081743fe0e4983, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9d9ed47c42b345cabd126346750bf2fa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a55dfafa9b844a1fbb122cb924f9e8c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/dca9e72a5b244287ab12bda2f663128a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/740b60bde58c4c828282ca88c96845ac, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3320e785854446ccb35cbb8a5d1e2805, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/18b5001883e747999aeee9bd3b8209d7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/df79c96ae84342c98d87c289f5f6e0df, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b4801353e1f24f75b81c42c7304e7a63] to archive 2024-11-22T19:23:23,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:23:23,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e998fc66c6704f2aa6ba1f3281eb20ff to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e998fc66c6704f2aa6ba1f3281eb20ff 2024-11-22T19:23:23,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6f4973880e44a02a1883d142ca29ad2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6f4973880e44a02a1883d142ca29ad2 2024-11-22T19:23:23,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/8284d7cd94a24778ad876f7ff1cccfd6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/8284d7cd94a24778ad876f7ff1cccfd6 2024-11-22T19:23:23,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4b9430fa6ba14e4091144bdbff542e87 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4b9430fa6ba14e4091144bdbff542e87 2024-11-22T19:23:23,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6a937864e2c47acbae6dfb1f4c22134 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d6a937864e2c47acbae6dfb1f4c22134 2024-11-22T19:23:23,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9996d997f48947daafa8cb0a6c20ee78 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9996d997f48947daafa8cb0a6c20ee78 2024-11-22T19:23:23,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3445bdf1db0f42e88bbe13877459e4b0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3445bdf1db0f42e88bbe13877459e4b0 2024-11-22T19:23:23,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/89d5151c0bc44e5190615458b9dc8e49 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/89d5151c0bc44e5190615458b9dc8e49 2024-11-22T19:23:23,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/2e4c79dbdb254747a36e7c8d2a7529ae to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/2e4c79dbdb254747a36e7c8d2a7529ae 2024-11-22T19:23:23,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/41ab8b729f38403ab08c3fbfa6665c45 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/41ab8b729f38403ab08c3fbfa6665c45 2024-11-22T19:23:23,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aaa1bbd6bb404d3fb7824d5cc2ed7c77 2024-11-22T19:23:23,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f9f71a3ea4e5433c9b31c5792bc94136 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f9f71a3ea4e5433c9b31c5792bc94136 2024-11-22T19:23:23,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c323b2c63fad42fab71c664d62d65d82 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c323b2c63fad42fab71c664d62d65d82 2024-11-22T19:23:23,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c986815811ec4fae9e33f99971544f2e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/c986815811ec4fae9e33f99971544f2e 2024-11-22T19:23:23,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f3bdf3d2cc8042c7b2ac8a5a2f7b4487 2024-11-22T19:23:23,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4c8c57a2af7f4b76829be77ce87233b1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4c8c57a2af7f4b76829be77ce87233b1 2024-11-22T19:23:23,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7ae264b4011e4ba0af51df78cfb57b2a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7ae264b4011e4ba0af51df78cfb57b2a 2024-11-22T19:23:23,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f0f5bc87d69a4eb696bea74fec596085 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f0f5bc87d69a4eb696bea74fec596085 2024-11-22T19:23:23,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/98ca44d60a944bc7be1b87b5c195aa2b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/98ca44d60a944bc7be1b87b5c195aa2b 2024-11-22T19:23:23,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/639ddd5acbc84a12883cc0f2c58454f8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/639ddd5acbc84a12883cc0f2c58454f8 2024-11-22T19:23:23,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a2b417f8f2fa452b899a225a5c54449d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a2b417f8f2fa452b899a225a5c54449d 2024-11-22T19:23:23,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa5b2408b4d44f3ea6e053ea590ca109 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa5b2408b4d44f3ea6e053ea590ca109 2024-11-22T19:23:23,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e2c645302e5f42bb92db86f7175992cc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e2c645302e5f42bb92db86f7175992cc 2024-11-22T19:23:23,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e157f187fc8842d4bf4c693c55c4a839 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/e157f187fc8842d4bf4c693c55c4a839 2024-11-22T19:23:23,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/04b0ee589b6e4a77860967b8bd44bbfd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/04b0ee589b6e4a77860967b8bd44bbfd 2024-11-22T19:23:23,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/93453b641b0d4db295e62a0ca57c8d69 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/93453b641b0d4db295e62a0ca57c8d69 2024-11-22T19:23:23,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d1e2f6a326c74f5b99874dcf189920a3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/d1e2f6a326c74f5b99874dcf189920a3 2024-11-22T19:23:23,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f32b34f86dc94f7ab6f769b58de76a50 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f32b34f86dc94f7ab6f769b58de76a50 2024-11-22T19:23:23,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/49ca99f074db49f8b09e5a453bc1f5c8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/49ca99f074db49f8b09e5a453bc1f5c8 2024-11-22T19:23:23,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/fa83f8c2395f4724a2ab9780fd100364 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/fa83f8c2395f4724a2ab9780fd100364 2024-11-22T19:23:23,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f5bf4dfa9ef34001a57159813e6e2bf1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/f5bf4dfa9ef34001a57159813e6e2bf1 2024-11-22T19:23:23,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/acca12499bf0466f9206ac1fead624b0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/acca12499bf0466f9206ac1fead624b0 2024-11-22T19:23:23,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b487763388514094ab2e9473e0869458 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b487763388514094ab2e9473e0869458 2024-11-22T19:23:23,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7f559d58a7954123bd2d17621426fd72 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7f559d58a7954123bd2d17621426fd72 2024-11-22T19:23:23,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa2f1d12b5d24f1b9bc11e16ff2814d0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/aa2f1d12b5d24f1b9bc11e16ff2814d0 2024-11-22T19:23:23,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7423ce8cef984c4284e8f08c73d8c3a3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/7423ce8cef984c4284e8f08c73d8c3a3 2024-11-22T19:23:23,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3a49b2ade6cf4bd9b787376cec4682d7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3a49b2ade6cf4bd9b787376cec4682d7 2024-11-22T19:23:23,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/00c548f3b0b14ac3af081743fe0e4983 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/00c548f3b0b14ac3af081743fe0e4983 2024-11-22T19:23:23,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9d9ed47c42b345cabd126346750bf2fa to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/9d9ed47c42b345cabd126346750bf2fa 2024-11-22T19:23:23,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a55dfafa9b844a1fbb122cb924f9e8c1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/a55dfafa9b844a1fbb122cb924f9e8c1 2024-11-22T19:23:23,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/dca9e72a5b244287ab12bda2f663128a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/dca9e72a5b244287ab12bda2f663128a 2024-11-22T19:23:23,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/740b60bde58c4c828282ca88c96845ac to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/740b60bde58c4c828282ca88c96845ac 2024-11-22T19:23:23,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3320e785854446ccb35cbb8a5d1e2805 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/3320e785854446ccb35cbb8a5d1e2805 2024-11-22T19:23:23,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/18b5001883e747999aeee9bd3b8209d7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/18b5001883e747999aeee9bd3b8209d7 2024-11-22T19:23:23,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/df79c96ae84342c98d87c289f5f6e0df to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/df79c96ae84342c98d87c289f5f6e0df 2024-11-22T19:23:23,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b4801353e1f24f75b81c42c7304e7a63 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/b4801353e1f24f75b81c42c7304e7a63 2024-11-22T19:23:23,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc17d7d28cbf43b0bb2ac8579ac76004, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/31484cfc2c08467c842ddc2024d38808, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/563fcfc545564256a9a6cf57aff71f63, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/38e95111fedb448298bf4114b1092be2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/91d1d9a9867642e4b6fa1f661cdf9bb5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a062cf04bff14a54beffa6fa7f92fb45, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/0ec8cdbfb9d44a89ab94692056cf4123, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/22a7bc37de3540c09b8063c08ff16759, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e040aa3e9733428686bc30550c98af92, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2e14ffc5e4204191bc45542fdee093cb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2c51ee0321bd4e648ed9d6c77257d01f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/f7ff9de4ae9a4ea4848cf2192d633bfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3372c9e099b4cdd9c2038e32285e786, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/089284de71dc4c1a89c938bf1f755e54, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a9644dd2e4a94cc6932b6afa8529db77, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/c52ccc994aa84c55a6bab16a64eeba7f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/30c19665cbf644f985c0271bf6a15db2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/dba3c26b754e40f6b6e095903dd90492, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e4753735f3c44ebeb6982672f524c6ee, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/89adbbd3b8fe44ce982810007bb1bc70, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/537579f02ee245ac8243e5b5898def67, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2cdaad9a6f244a76b3e5a0cbd293e181, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc346babf17d4699b4462bdfe9dafc6c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/667c841fa77c45e89adfa45694231198, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/998fdc28d3e945228bad75b6e8bf60b3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/247783e2af4e4a5ca25cd8ae5b93d668, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e69abe4adbec43bda6ccec75f52ac16e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/068c842b266d457ca6396ad71ff059ef, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/34c79e3e086f4da4be67d4b35e01ab80, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e1b0624f68fd43bc945a060485673e44, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/bce6fa366b814b1e8e6a7369894fe66a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/5a01e96cc8434534933e0e6f146277d1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6401aafa6d5d48b8973eaad8fedb85c2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/935032b3284d4a41ba47c011238c3887, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/28c4f5aa58864f31b726117eb32210e0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/60b40a3c9a61483db2139562d3b21b2e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/76cfbad4c7024e9a892e553ee01b3d5a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/489f27bed39f467197005101ef357e9f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e507dab0189741ba97cd010489b5c33c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2a4aada8ad4848768b264ad774189bce, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/88ecbace09dc4755be19ded4902cdc3b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/964a1b0aa7964735a9fe892b684bb3f1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6c992448244341018c6df6a1a342d1cd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6d1f413107354aa3902354a297496c29, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/fa46959e9cd74c9ca7dd241d196db708, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a106a298e3d14c35b083aac66c706636] to archive 2024-11-22T19:23:23,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:23:23,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc17d7d28cbf43b0bb2ac8579ac76004 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc17d7d28cbf43b0bb2ac8579ac76004 2024-11-22T19:23:23,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/31484cfc2c08467c842ddc2024d38808 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/31484cfc2c08467c842ddc2024d38808 2024-11-22T19:23:23,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/563fcfc545564256a9a6cf57aff71f63 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/563fcfc545564256a9a6cf57aff71f63 2024-11-22T19:23:23,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/38e95111fedb448298bf4114b1092be2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/38e95111fedb448298bf4114b1092be2 2024-11-22T19:23:23,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/91d1d9a9867642e4b6fa1f661cdf9bb5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/91d1d9a9867642e4b6fa1f661cdf9bb5 2024-11-22T19:23:23,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a062cf04bff14a54beffa6fa7f92fb45 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a062cf04bff14a54beffa6fa7f92fb45 2024-11-22T19:23:23,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/0ec8cdbfb9d44a89ab94692056cf4123 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/0ec8cdbfb9d44a89ab94692056cf4123 2024-11-22T19:23:23,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/22a7bc37de3540c09b8063c08ff16759 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/22a7bc37de3540c09b8063c08ff16759 2024-11-22T19:23:23,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e040aa3e9733428686bc30550c98af92 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e040aa3e9733428686bc30550c98af92 2024-11-22T19:23:23,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2e14ffc5e4204191bc45542fdee093cb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2e14ffc5e4204191bc45542fdee093cb 2024-11-22T19:23:23,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2c51ee0321bd4e648ed9d6c77257d01f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2c51ee0321bd4e648ed9d6c77257d01f 2024-11-22T19:23:23,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/f7ff9de4ae9a4ea4848cf2192d633bfe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/f7ff9de4ae9a4ea4848cf2192d633bfe 2024-11-22T19:23:23,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3372c9e099b4cdd9c2038e32285e786 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3372c9e099b4cdd9c2038e32285e786 2024-11-22T19:23:23,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/089284de71dc4c1a89c938bf1f755e54 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/089284de71dc4c1a89c938bf1f755e54 2024-11-22T19:23:23,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a9644dd2e4a94cc6932b6afa8529db77 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a9644dd2e4a94cc6932b6afa8529db77 2024-11-22T19:23:23,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/c52ccc994aa84c55a6bab16a64eeba7f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/c52ccc994aa84c55a6bab16a64eeba7f 2024-11-22T19:23:23,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/30c19665cbf644f985c0271bf6a15db2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/30c19665cbf644f985c0271bf6a15db2 2024-11-22T19:23:23,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/dba3c26b754e40f6b6e095903dd90492 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/dba3c26b754e40f6b6e095903dd90492 2024-11-22T19:23:23,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e4753735f3c44ebeb6982672f524c6ee to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e4753735f3c44ebeb6982672f524c6ee 2024-11-22T19:23:23,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/89adbbd3b8fe44ce982810007bb1bc70 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/89adbbd3b8fe44ce982810007bb1bc70 2024-11-22T19:23:23,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/537579f02ee245ac8243e5b5898def67 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/537579f02ee245ac8243e5b5898def67 2024-11-22T19:23:23,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2cdaad9a6f244a76b3e5a0cbd293e181 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2cdaad9a6f244a76b3e5a0cbd293e181 2024-11-22T19:23:23,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc346babf17d4699b4462bdfe9dafc6c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/cc346babf17d4699b4462bdfe9dafc6c 2024-11-22T19:23:23,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/667c841fa77c45e89adfa45694231198 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/667c841fa77c45e89adfa45694231198 2024-11-22T19:23:23,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/998fdc28d3e945228bad75b6e8bf60b3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/998fdc28d3e945228bad75b6e8bf60b3 2024-11-22T19:23:23,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/247783e2af4e4a5ca25cd8ae5b93d668 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/247783e2af4e4a5ca25cd8ae5b93d668 2024-11-22T19:23:23,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e69abe4adbec43bda6ccec75f52ac16e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e69abe4adbec43bda6ccec75f52ac16e 2024-11-22T19:23:23,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/068c842b266d457ca6396ad71ff059ef to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/068c842b266d457ca6396ad71ff059ef 2024-11-22T19:23:23,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/34c79e3e086f4da4be67d4b35e01ab80 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/34c79e3e086f4da4be67d4b35e01ab80 2024-11-22T19:23:23,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e1b0624f68fd43bc945a060485673e44 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e1b0624f68fd43bc945a060485673e44 2024-11-22T19:23:23,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/bce6fa366b814b1e8e6a7369894fe66a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/bce6fa366b814b1e8e6a7369894fe66a 2024-11-22T19:23:23,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/5a01e96cc8434534933e0e6f146277d1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/5a01e96cc8434534933e0e6f146277d1 2024-11-22T19:23:23,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6401aafa6d5d48b8973eaad8fedb85c2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6401aafa6d5d48b8973eaad8fedb85c2 2024-11-22T19:23:23,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/935032b3284d4a41ba47c011238c3887 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/935032b3284d4a41ba47c011238c3887 2024-11-22T19:23:23,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/28c4f5aa58864f31b726117eb32210e0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/28c4f5aa58864f31b726117eb32210e0 2024-11-22T19:23:23,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/60b40a3c9a61483db2139562d3b21b2e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/60b40a3c9a61483db2139562d3b21b2e 2024-11-22T19:23:23,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/76cfbad4c7024e9a892e553ee01b3d5a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/76cfbad4c7024e9a892e553ee01b3d5a 2024-11-22T19:23:23,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/489f27bed39f467197005101ef357e9f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/489f27bed39f467197005101ef357e9f 2024-11-22T19:23:23,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e507dab0189741ba97cd010489b5c33c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/e507dab0189741ba97cd010489b5c33c 2024-11-22T19:23:23,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2a4aada8ad4848768b264ad774189bce to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/2a4aada8ad4848768b264ad774189bce 2024-11-22T19:23:23,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/88ecbace09dc4755be19ded4902cdc3b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/88ecbace09dc4755be19ded4902cdc3b 2024-11-22T19:23:23,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/964a1b0aa7964735a9fe892b684bb3f1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/964a1b0aa7964735a9fe892b684bb3f1 2024-11-22T19:23:23,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6c992448244341018c6df6a1a342d1cd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6c992448244341018c6df6a1a342d1cd 2024-11-22T19:23:23,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6d1f413107354aa3902354a297496c29 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/6d1f413107354aa3902354a297496c29 2024-11-22T19:23:23,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/fa46959e9cd74c9ca7dd241d196db708 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/fa46959e9cd74c9ca7dd241d196db708 2024-11-22T19:23:23,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a106a298e3d14c35b083aac66c706636 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a106a298e3d14c35b083aac66c706636 2024-11-22T19:23:23,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/07d11fc1ba604d218aa49aca48d5db91, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/eb21e6b4e9254f83a2e413b783a89d11, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7ba98a3864914cab8cfee1d6e2402b76, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a0050d7dd4dd4d8ba373c84b36d7ce96, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2ff15428645c4558a2c7a7d3d80f9aaa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0a8a49422e604f69aa9f183bc911e4c0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c4a9482162c0461eaaabe5ac5f8db61a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/25730cfbb3a04303ac46dda39915e44a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/6092c449a4384247ac7ea8c9f3e6bba3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5f6cd007914c405bbe9b438f1ba27623, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5cfc75fa999f4c5092de287412eb1dcd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9527662c094e4471a06ab81edb1da9d4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dc921f7c342443b9a9cead3b37daa28d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/e50601abd3924fc490bddbfb2bae2a75, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dbfef666370b4b4482993796c3fc89f0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/183e5667786a467eaaca07168aefa301, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/05e9f80013eb43ff81ec441314806109, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9e86f062cab449d699bdc47add859f3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/616ed1b67f414d49a3fad89974a57474, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c7059c3bf91b4ff490a27404b5b7b387, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0b368cdcc941487da55aa4cbef6aaa0e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a2434eee018544328db0dc86ef407bc2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c336223973fc4c50bfe6926c259d8e8b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/1d4d598cc47d4bcebfc3939d74c81a1b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/933c0cc94fc44c70ab8271d2b69dabcd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/012460f641fc407c94d14033c086e359, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/d7cebb95964743ccb9356c161efb2029, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2bf461f9d7af4773b5ad2a468b7811ba, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7e0b0dd57f404d8b8f4577cebcbcb6e3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/097bcd9cb10c41f4bf9372d3ee7be4e1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c99e507f9ed2406cbadbffe8933a763c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/aeda6ca8144141188ea097ec250a8514, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/8402aca896a642fe825f85832a1eff25, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/4e30f4346fbf40f7b5a4df4c108e3824, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/77ee3fbb257049adbc6727057778b29d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/ae525dbeb6b2482c8dccc605268893f1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/84b27e77728d49f489cbf6ebe63fe130, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/be7d63bec44b45d1bb15987fd09dbf1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fd6e3195719842b2966905ea23b791f5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fecc9866a5cc49619dd11757325ee7c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3f6671608f3f4c14a6516289f62005e4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/445fb1e4fe1647b9ac555ed85a938b0f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3db0feac4dfd4d0c99eec41f278669df, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/b95e1a07370142828c0cbf711a6570b9] to archive 2024-11-22T19:23:23,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:23:23,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/07d11fc1ba604d218aa49aca48d5db91 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/07d11fc1ba604d218aa49aca48d5db91 2024-11-22T19:23:23,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/eb21e6b4e9254f83a2e413b783a89d11 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/eb21e6b4e9254f83a2e413b783a89d11 2024-11-22T19:23:23,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7ba98a3864914cab8cfee1d6e2402b76 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7ba98a3864914cab8cfee1d6e2402b76 2024-11-22T19:23:23,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a0050d7dd4dd4d8ba373c84b36d7ce96 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a0050d7dd4dd4d8ba373c84b36d7ce96 2024-11-22T19:23:23,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2ff15428645c4558a2c7a7d3d80f9aaa to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2ff15428645c4558a2c7a7d3d80f9aaa 2024-11-22T19:23:23,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0a8a49422e604f69aa9f183bc911e4c0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0a8a49422e604f69aa9f183bc911e4c0 2024-11-22T19:23:23,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c4a9482162c0461eaaabe5ac5f8db61a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c4a9482162c0461eaaabe5ac5f8db61a 2024-11-22T19:23:23,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/25730cfbb3a04303ac46dda39915e44a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/25730cfbb3a04303ac46dda39915e44a 2024-11-22T19:23:23,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/6092c449a4384247ac7ea8c9f3e6bba3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/6092c449a4384247ac7ea8c9f3e6bba3 2024-11-22T19:23:23,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5f6cd007914c405bbe9b438f1ba27623 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5f6cd007914c405bbe9b438f1ba27623 2024-11-22T19:23:23,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5cfc75fa999f4c5092de287412eb1dcd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/5cfc75fa999f4c5092de287412eb1dcd 2024-11-22T19:23:23,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9527662c094e4471a06ab81edb1da9d4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9527662c094e4471a06ab81edb1da9d4 2024-11-22T19:23:23,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dc921f7c342443b9a9cead3b37daa28d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dc921f7c342443b9a9cead3b37daa28d 2024-11-22T19:23:23,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/e50601abd3924fc490bddbfb2bae2a75 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/e50601abd3924fc490bddbfb2bae2a75 2024-11-22T19:23:23,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dbfef666370b4b4482993796c3fc89f0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/dbfef666370b4b4482993796c3fc89f0 2024-11-22T19:23:23,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/183e5667786a467eaaca07168aefa301 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/183e5667786a467eaaca07168aefa301 2024-11-22T19:23:23,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/05e9f80013eb43ff81ec441314806109 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/05e9f80013eb43ff81ec441314806109 2024-11-22T19:23:23,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9e86f062cab449d699bdc47add859f3c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/9e86f062cab449d699bdc47add859f3c 2024-11-22T19:23:23,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/616ed1b67f414d49a3fad89974a57474 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/616ed1b67f414d49a3fad89974a57474 2024-11-22T19:23:23,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c7059c3bf91b4ff490a27404b5b7b387 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c7059c3bf91b4ff490a27404b5b7b387 2024-11-22T19:23:23,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2d4d8dd6fac24efeae1a47b3bd0a4c0c 2024-11-22T19:23:23,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0b368cdcc941487da55aa4cbef6aaa0e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/0b368cdcc941487da55aa4cbef6aaa0e 2024-11-22T19:23:23,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a2434eee018544328db0dc86ef407bc2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a2434eee018544328db0dc86ef407bc2 2024-11-22T19:23:23,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c336223973fc4c50bfe6926c259d8e8b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c336223973fc4c50bfe6926c259d8e8b 2024-11-22T19:23:23,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/1d4d598cc47d4bcebfc3939d74c81a1b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/1d4d598cc47d4bcebfc3939d74c81a1b 2024-11-22T19:23:23,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/933c0cc94fc44c70ab8271d2b69dabcd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/933c0cc94fc44c70ab8271d2b69dabcd 2024-11-22T19:23:23,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/012460f641fc407c94d14033c086e359 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/012460f641fc407c94d14033c086e359 2024-11-22T19:23:23,194 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/d7cebb95964743ccb9356c161efb2029 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/d7cebb95964743ccb9356c161efb2029 2024-11-22T19:23:23,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2bf461f9d7af4773b5ad2a468b7811ba to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/2bf461f9d7af4773b5ad2a468b7811ba 2024-11-22T19:23:23,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7e0b0dd57f404d8b8f4577cebcbcb6e3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/7e0b0dd57f404d8b8f4577cebcbcb6e3 2024-11-22T19:23:23,196 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/097bcd9cb10c41f4bf9372d3ee7be4e1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/097bcd9cb10c41f4bf9372d3ee7be4e1 2024-11-22T19:23:23,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c99e507f9ed2406cbadbffe8933a763c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/c99e507f9ed2406cbadbffe8933a763c 2024-11-22T19:23:23,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/aeda6ca8144141188ea097ec250a8514 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/aeda6ca8144141188ea097ec250a8514 2024-11-22T19:23:23,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/8402aca896a642fe825f85832a1eff25 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/8402aca896a642fe825f85832a1eff25 2024-11-22T19:23:23,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/4e30f4346fbf40f7b5a4df4c108e3824 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/4e30f4346fbf40f7b5a4df4c108e3824 2024-11-22T19:23:23,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/77ee3fbb257049adbc6727057778b29d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/77ee3fbb257049adbc6727057778b29d 2024-11-22T19:23:23,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/ae525dbeb6b2482c8dccc605268893f1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/ae525dbeb6b2482c8dccc605268893f1 2024-11-22T19:23:23,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/84b27e77728d49f489cbf6ebe63fe130 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/84b27e77728d49f489cbf6ebe63fe130 2024-11-22T19:23:23,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/be7d63bec44b45d1bb15987fd09dbf1d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/be7d63bec44b45d1bb15987fd09dbf1d 2024-11-22T19:23:23,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fd6e3195719842b2966905ea23b791f5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fd6e3195719842b2966905ea23b791f5 2024-11-22T19:23:23,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fecc9866a5cc49619dd11757325ee7c1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/fecc9866a5cc49619dd11757325ee7c1 2024-11-22T19:23:23,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3f6671608f3f4c14a6516289f62005e4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3f6671608f3f4c14a6516289f62005e4 2024-11-22T19:23:23,205 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/445fb1e4fe1647b9ac555ed85a938b0f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/445fb1e4fe1647b9ac555ed85a938b0f 2024-11-22T19:23:23,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3db0feac4dfd4d0c99eec41f278669df to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/3db0feac4dfd4d0c99eec41f278669df 2024-11-22T19:23:23,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/b95e1a07370142828c0cbf711a6570b9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/b95e1a07370142828c0cbf711a6570b9 2024-11-22T19:23:23,210 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/recovered.edits/667.seqid, newMaxSeqId=667, maxSeqId=1 2024-11-22T19:23:23,211 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0. 2024-11-22T19:23:23,211 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for 583533db2ec7fa9b81dbb4dd334629b0: 2024-11-22T19:23:23,212 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed 583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:23,212 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=583533db2ec7fa9b81dbb4dd334629b0, regionState=CLOSED 2024-11-22T19:23:23,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-22T19:23:23,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure 583533db2ec7fa9b81dbb4dd334629b0, server=a307a1377457,35917,1732303314657 in 1.5180 sec 2024-11-22T19:23:23,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-22T19:23:23,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=583533db2ec7fa9b81dbb4dd334629b0, UNASSIGN in 1.5210 sec 2024-11-22T19:23:23,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-22T19:23:23,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5240 sec 2024-11-22T19:23:23,217 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303403217"}]},"ts":"1732303403217"} 2024-11-22T19:23:23,218 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T19:23:23,220 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T19:23:23,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5330 sec 2024-11-22T19:23:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-22T19:23:23,792 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-22T19:23:23,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T19:23:23,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,794 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-22T19:23:23,794 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=99, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,796 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:23,797 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/recovered.edits] 2024-11-22T19:23:23,800 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4e82ca3f15b3429f9c457ca992201f8e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/4e82ca3f15b3429f9c457ca992201f8e 2024-11-22T19:23:23,801 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/72ebd9302dd7468cb831e84e12f00d5e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/A/72ebd9302dd7468cb831e84e12f00d5e 2024-11-22T19:23:23,803 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/4b649b3c20744ddfb47348d22546f16d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/4b649b3c20744ddfb47348d22546f16d 2024-11-22T19:23:23,803 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3908cecc0984a4a952e163ee19ee3d6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/B/a3908cecc0984a4a952e163ee19ee3d6 2024-11-22T19:23:23,805 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/89572258542748859a1bbadddf887bf7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/89572258542748859a1bbadddf887bf7 2024-11-22T19:23:23,806 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a861ee24be314fe183fb2538620b8863 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/C/a861ee24be314fe183fb2538620b8863 2024-11-22T19:23:23,808 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/recovered.edits/667.seqid to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0/recovered.edits/667.seqid 2024-11-22T19:23:23,808 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/583533db2ec7fa9b81dbb4dd334629b0 2024-11-22T19:23:23,808 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T19:23:23,810 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=99, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,813 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T19:23:23,815 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T19:23:23,816 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=99, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,816 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T19:23:23,816 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732303403816"}]},"ts":"9223372036854775807"} 2024-11-22T19:23:23,817 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T19:23:23,817 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 583533db2ec7fa9b81dbb4dd334629b0, NAME => 'TestAcidGuarantees,,1732303372171.583533db2ec7fa9b81dbb4dd334629b0.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T19:23:23,817 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T19:23:23,818 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732303403817"}]},"ts":"9223372036854775807"} 2024-11-22T19:23:23,819 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T19:23:23,820 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=99, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-11-22T19:23:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-22T19:23:23,895 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-11-22T19:23:23,904 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 244), OpenFileDescriptor=453 (was 465), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=723 (was 737), ProcessCount=11 (was 11), AvailableMemoryMB=4108 (was 4169) 2024-11-22T19:23:23,913 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=723, ProcessCount=11, AvailableMemoryMB=4107 2024-11-22T19:23:23,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:23:23,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:23:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:23,915 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:23:23,916 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:23,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 100 2024-11-22T19:23:23,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-22T19:23:23,916 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:23:23,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742262_1438 (size=963) 2024-11-22T19:23:24,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-22T19:23:24,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T19:23:24,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-22T19:23:24,323 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:23:24,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742263_1439 (size=53) 2024-11-22T19:23:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-22T19:23:24,728 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:23:24,728 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 842a4f99d1015855e3e2b86470e8d61b, disabling compactions & flushes 2024-11-22T19:23:24,728 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:24,728 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:24,728 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. after waiting 0 ms 2024-11-22T19:23:24,728 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:24,728 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:24,728 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:24,729 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:23:24,729 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732303404729"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303404729"}]},"ts":"1732303404729"} 2024-11-22T19:23:24,730 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:23:24,731 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:23:24,731 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303404731"}]},"ts":"1732303404731"} 2024-11-22T19:23:24,732 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T19:23:24,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, ASSIGN}] 2024-11-22T19:23:24,736 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, ASSIGN 2024-11-22T19:23:24,736 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:23:24,887 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:24,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:23:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-22T19:23:25,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:25,042 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:25,042 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:23:25,042 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,042 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:23:25,043 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,043 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,044 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,045 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:25,045 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842a4f99d1015855e3e2b86470e8d61b columnFamilyName A 2024-11-22T19:23:25,045 DEBUG [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:25,045 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(327): Store=842a4f99d1015855e3e2b86470e8d61b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:25,046 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,046 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:25,047 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842a4f99d1015855e3e2b86470e8d61b columnFamilyName B 2024-11-22T19:23:25,047 DEBUG [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:25,047 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(327): Store=842a4f99d1015855e3e2b86470e8d61b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:25,047 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,048 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:25,048 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842a4f99d1015855e3e2b86470e8d61b columnFamilyName C 2024-11-22T19:23:25,048 DEBUG [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:25,048 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(327): Store=842a4f99d1015855e3e2b86470e8d61b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:25,049 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:25,049 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,049 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,050 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:23:25,051 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:25,053 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:23:25,053 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 842a4f99d1015855e3e2b86470e8d61b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70038083, jitterRate=0.04364876449108124}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:23:25,054 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:25,054 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., pid=102, masterSystemTime=1732303405039 2024-11-22T19:23:25,055 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:25,055 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:25,056 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:25,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-22T19:23:25,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 in 169 msec 2024-11-22T19:23:25,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-22T19:23:25,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, ASSIGN in 323 msec 2024-11-22T19:23:25,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:23:25,060 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303405059"}]},"ts":"1732303405059"} 2024-11-22T19:23:25,060 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T19:23:25,062 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:23:25,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-22T19:23:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-22T19:23:26,020 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-22T19:23:26,021 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5fe71801 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@bf5e2f0 2024-11-22T19:23:26,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b82ba2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,027 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:26,028 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:26,029 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:23:26,030 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:23:26,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:23:26,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:23:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742264_1440 (size=999) 2024-11-22T19:23:26,444 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-22T19:23:26,444 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-22T19:23:26,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:23:26,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, REOPEN/MOVE}] 2024-11-22T19:23:26,448 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, REOPEN/MOVE 2024-11-22T19:23:26,448 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:26,449 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:23:26,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; CloseRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:23:26,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:26,601 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(124): Close 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,601 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:23:26,601 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1681): Closing 842a4f99d1015855e3e2b86470e8d61b, disabling compactions & flushes 2024-11-22T19:23:26,601 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,601 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,601 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. after waiting 0 ms 2024-11-22T19:23:26,601 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,604 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-22T19:23:26,605 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,605 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1635): Region close journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:26,605 WARN [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionServer(3786): Not adding moved region record: 842a4f99d1015855e3e2b86470e8d61b to self. 2024-11-22T19:23:26,606 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(170): Closed 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,607 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=CLOSED 2024-11-22T19:23:26,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-11-22T19:23:26,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; CloseRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 in 158 msec 2024-11-22T19:23:26,609 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, REOPEN/MOVE; state=CLOSED, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=true 2024-11-22T19:23:26,759 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:26,761 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=105, state=RUNNABLE; OpenRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:23:26,912 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:26,915 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,915 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7285): Opening region: {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:23:26,916 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,916 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:23:26,916 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7327): checking encryption for 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,916 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7330): checking classloading for 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,917 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,918 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:26,918 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842a4f99d1015855e3e2b86470e8d61b columnFamilyName A 2024-11-22T19:23:26,919 DEBUG [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:26,919 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(327): Store=842a4f99d1015855e3e2b86470e8d61b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:26,920 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,920 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:26,921 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842a4f99d1015855e3e2b86470e8d61b columnFamilyName B 2024-11-22T19:23:26,921 DEBUG [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:26,921 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(327): Store=842a4f99d1015855e3e2b86470e8d61b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:26,921 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,922 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:26,922 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842a4f99d1015855e3e2b86470e8d61b columnFamilyName C 2024-11-22T19:23:26,922 DEBUG [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:26,923 INFO [StoreOpener-842a4f99d1015855e3e2b86470e8d61b-1 {}] regionserver.HStore(327): Store=842a4f99d1015855e3e2b86470e8d61b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:26,923 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,923 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,924 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,925 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:23:26,927 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1085): writing seq id for 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:26,928 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1102): Opened 842a4f99d1015855e3e2b86470e8d61b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69258493, jitterRate=0.03203196823596954}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:23:26,929 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1001): Region open journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:26,929 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., pid=107, masterSystemTime=1732303406912 2024-11-22T19:23:26,931 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,931 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:26,931 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=OPEN, openSeqNum=5, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:26,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=105 2024-11-22T19:23:26,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=105, state=SUCCESS; OpenRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 in 172 msec 2024-11-22T19:23:26,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-22T19:23:26,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, REOPEN/MOVE in 486 msec 2024-11-22T19:23:26,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-22T19:23:26,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 490 msec 2024-11-22T19:23:26,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 905 msec 2024-11-22T19:23:26,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-11-22T19:23:26,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51f7d511 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75b14fbd 2024-11-22T19:23:26,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b6cf8cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,950 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc42ea6 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62f74604 2024-11-22T19:23:26,953 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec15031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,954 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-11-22T19:23:26,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,959 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-11-22T19:23:26,962 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,963 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-11-22T19:23:26,966 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,966 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-11-22T19:23:26,969 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-11-22T19:23:26,974 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,975 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e3203d9 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61ec0f48 2024-11-22T19:23:26,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e4d3d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-11-22T19:23:26,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,982 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7284f16d to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47679076 2024-11-22T19:23:26,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68035c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:26,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:26,995 DEBUG [hconnection-0x5f73ade1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:26,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-22T19:23:26,996 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:26,996 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T19:23:26,997 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:26,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:27,000 DEBUG [hconnection-0x2e4c8b1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,000 DEBUG [hconnection-0x537067bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,001 DEBUG [hconnection-0x4c9fb017-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,001 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,001 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,002 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,005 DEBUG [hconnection-0x1df4f30d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,006 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,006 DEBUG [hconnection-0x1b5e2a64-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:27,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:27,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:27,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:27,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:27,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:27,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:27,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:27,008 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,018 DEBUG [hconnection-0x2fa9eaed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,022 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303467028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303467028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303467029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303467030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,032 DEBUG [hconnection-0x28c7b703-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,032 DEBUG [hconnection-0x7b7d89e9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,033 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,033 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,036 DEBUG [hconnection-0x6a769da-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:27,045 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:27,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303467046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222c06254588c545b78d43f0c43117ab99_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303407004/Put/seqid=0 2024-11-22T19:23:27,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742265_1441 (size=9714) 2024-11-22T19:23:27,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T19:23:27,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303467131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303467131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303467131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303467131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,149 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:27,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303467148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T19:23:27,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:27,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:27,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:27,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303467333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303467334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303467334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303467334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303467352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:27,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:27,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:27,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,472 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:27,476 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222c06254588c545b78d43f0c43117ab99_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222c06254588c545b78d43f0c43117ab99_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:27,477 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/f5258b624a9e40d5846cb3c92ba5f29a, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:27,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/f5258b624a9e40d5846cb3c92ba5f29a is 175, key is test_row_0/A:col10/1732303407004/Put/seqid=0 2024-11-22T19:23:27,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742266_1442 (size=22361) 2024-11-22T19:23:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T19:23:27,612 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:27,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:27,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:27,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303467639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303467639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303467639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303467640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303467657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:27,764 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:27,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:27,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:27,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,885 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/f5258b624a9e40d5846cb3c92ba5f29a 2024-11-22T19:23:27,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/4f955b6a65a443eab6c632dc2a714ed9 is 50, key is test_row_0/B:col10/1732303407004/Put/seqid=0 2024-11-22T19:23:27,918 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:27,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:27,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:27,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:27,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:27,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742267_1443 (size=9657) 2024-11-22T19:23:27,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/4f955b6a65a443eab6c632dc2a714ed9 2024-11-22T19:23:27,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ba136914ca7348f092beaba699871ec0 is 50, key is test_row_0/C:col10/1732303407004/Put/seqid=0 2024-11-22T19:23:27,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742268_1444 (size=9657) 2024-11-22T19:23:27,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ba136914ca7348f092beaba699871ec0 2024-11-22T19:23:27,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/f5258b624a9e40d5846cb3c92ba5f29a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a 2024-11-22T19:23:27,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a, entries=100, sequenceid=15, filesize=21.8 K 2024-11-22T19:23:27,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/4f955b6a65a443eab6c632dc2a714ed9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/4f955b6a65a443eab6c632dc2a714ed9 2024-11-22T19:23:27,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/4f955b6a65a443eab6c632dc2a714ed9, entries=100, sequenceid=15, filesize=9.4 K 2024-11-22T19:23:27,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ba136914ca7348f092beaba699871ec0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ba136914ca7348f092beaba699871ec0 2024-11-22T19:23:27,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ba136914ca7348f092beaba699871ec0, entries=100, sequenceid=15, filesize=9.4 K 2024-11-22T19:23:27,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 842a4f99d1015855e3e2b86470e8d61b in 973ms, sequenceid=15, compaction requested=false 2024-11-22T19:23:27,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:28,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:28,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T19:23:28,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:28,072 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T19:23:28,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:28,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:28,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:28,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:28,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:28,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:28,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122864846e6b90341ee87ccb5af9240cecb_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303407026/Put/seqid=0 2024-11-22T19:23:28,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T19:23:28,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742269_1445 (size=12154) 2024-11-22T19:23:28,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:28,127 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122864846e6b90341ee87ccb5af9240cecb_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122864846e6b90341ee87ccb5af9240cecb_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:28,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/d8eb3419cdf74f10b3a3c39f654c3da0, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:28,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/d8eb3419cdf74f10b3a3c39f654c3da0 is 175, key is test_row_0/A:col10/1732303407026/Put/seqid=0 2024-11-22T19:23:28,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742270_1446 (size=30955) 2024-11-22T19:23:28,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:28,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:28,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303468150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303468150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303468151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303468151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303468161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303468254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303468254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303468257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303468257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303468458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303468459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303468460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303468460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,536 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/d8eb3419cdf74f10b3a3c39f654c3da0 2024-11-22T19:23:28,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/a22f677b78eb48c5af6d678d73ce7807 is 50, key is test_row_0/B:col10/1732303407026/Put/seqid=0 2024-11-22T19:23:28,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742271_1447 (size=12001) 2024-11-22T19:23:28,572 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/a22f677b78eb48c5af6d678d73ce7807 2024-11-22T19:23:28,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/0930dbc67d024977b555abe1368cabfe is 50, key is test_row_0/C:col10/1732303407026/Put/seqid=0 2024-11-22T19:23:28,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742272_1448 (size=12001) 2024-11-22T19:23:28,744 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T19:23:28,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303468764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303468764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303468766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:28,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303468766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:28,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/0930dbc67d024977b555abe1368cabfe 2024-11-22T19:23:29,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/d8eb3419cdf74f10b3a3c39f654c3da0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0 2024-11-22T19:23:29,006 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0, entries=150, sequenceid=42, filesize=30.2 K 2024-11-22T19:23:29,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/a22f677b78eb48c5af6d678d73ce7807 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a22f677b78eb48c5af6d678d73ce7807 2024-11-22T19:23:29,014 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a22f677b78eb48c5af6d678d73ce7807, entries=150, sequenceid=42, filesize=11.7 K 2024-11-22T19:23:29,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/0930dbc67d024977b555abe1368cabfe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0930dbc67d024977b555abe1368cabfe 2024-11-22T19:23:29,022 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0930dbc67d024977b555abe1368cabfe, entries=150, sequenceid=42, filesize=11.7 K 2024-11-22T19:23:29,023 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 842a4f99d1015855e3e2b86470e8d61b in 951ms, sequenceid=42, compaction requested=false 2024-11-22T19:23:29,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:29,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-22T19:23:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-22T19:23:29,026 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-22T19:23:29,026 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0270 sec 2024-11-22T19:23:29,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.0310 sec 2024-11-22T19:23:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T19:23:29,101 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-22T19:23:29,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-22T19:23:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:29,104 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:29,105 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:29,105 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:29,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:29,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:29,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:29,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:29,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:29,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:29,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:29,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:29,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227dfa3c9a62dd4f5c9db88969556ffda6_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:29,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:29,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742273_1449 (size=14594) 2024-11-22T19:23:29,257 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:29,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:29,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:29,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303469277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303469276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303469278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303469279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303469284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303469387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303469387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303469387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303469387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303469391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:29,411 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:29,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:29,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:29,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:29,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:29,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:29,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303469596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303469596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303469596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303469598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303469603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,618 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:29,626 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227dfa3c9a62dd4f5c9db88969556ffda6_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227dfa3c9a62dd4f5c9db88969556ffda6_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:29,627 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/684c292c149a49d2b4fab8329b9bc6ad, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:29,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/684c292c149a49d2b4fab8329b9bc6ad is 175, key is test_row_0/A:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:29,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742274_1450 (size=39549) 2024-11-22T19:23:29,657 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/684c292c149a49d2b4fab8329b9bc6ad 2024-11-22T19:23:29,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/c7cbfa707e344db0954a2da62c50637b is 50, key is test_row_0/B:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:29,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742275_1451 (size=12001) 2024-11-22T19:23:29,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:29,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:29,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:29,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:29,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:29,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:29,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:29,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:29,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:29,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303469902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303469904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303469904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303469906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:29,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303469910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:30,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:30,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:30,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:30,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/c7cbfa707e344db0954a2da62c50637b 2024-11-22T19:23:30,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/8c6e2c8e04e142208fd0408d91262b44 is 50, key is test_row_0/C:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:30,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742276_1452 (size=12001) 2024-11-22T19:23:30,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:30,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:30,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:30,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,181 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:30,333 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:30,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:30,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:30,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303470409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:30,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303470410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:30,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303470412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:30,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303470413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:30,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303470419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:30,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:30,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:30,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:30,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:30,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/8c6e2c8e04e142208fd0408d91262b44 2024-11-22T19:23:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/684c292c149a49d2b4fab8329b9bc6ad as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad 2024-11-22T19:23:30,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad, entries=200, sequenceid=53, filesize=38.6 K 2024-11-22T19:23:30,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/c7cbfa707e344db0954a2da62c50637b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7cbfa707e344db0954a2da62c50637b 2024-11-22T19:23:30,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7cbfa707e344db0954a2da62c50637b, entries=150, sequenceid=53, filesize=11.7 K 2024-11-22T19:23:30,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/8c6e2c8e04e142208fd0408d91262b44 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8c6e2c8e04e142208fd0408d91262b44 2024-11-22T19:23:30,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8c6e2c8e04e142208fd0408d91262b44, entries=150, sequenceid=53, filesize=11.7 K 2024-11-22T19:23:30,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 842a4f99d1015855e3e2b86470e8d61b in 1377ms, sequenceid=53, compaction requested=true 2024-11-22T19:23:30,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:30,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:30,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:30,553 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:30,554 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:30,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:30,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:30,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:30,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/A is initiating minor compaction (all files) 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/B is initiating minor compaction (all files) 2024-11-22T19:23:30,555 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/A in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,555 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/B in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,555 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=90.7 K 2024-11-22T19:23:30,555 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/4f955b6a65a443eab6c632dc2a714ed9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a22f677b78eb48c5af6d678d73ce7807, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7cbfa707e344db0954a2da62c50637b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=32.9 K 2024-11-22T19:23:30,555 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad] 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f955b6a65a443eab6c632dc2a714ed9, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732303407004 2024-11-22T19:23:30,555 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5258b624a9e40d5846cb3c92ba5f29a, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732303407004 2024-11-22T19:23:30,556 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a22f677b78eb48c5af6d678d73ce7807, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732303407026 2024-11-22T19:23:30,556 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8eb3419cdf74f10b3a3c39f654c3da0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732303407026 2024-11-22T19:23:30,556 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c7cbfa707e344db0954a2da62c50637b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732303408150 2024-11-22T19:23:30,557 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 684c292c149a49d2b4fab8329b9bc6ad, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732303408149 2024-11-22T19:23:30,566 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#B#compaction#392 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:30,566 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/b91bbc4967f3418bbdf3058ef2edcd94 is 50, key is test_row_0/B:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:30,569 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:30,572 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411229269ffc0f1594bca9a62750ae09dab9b_842a4f99d1015855e3e2b86470e8d61b store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:30,574 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411229269ffc0f1594bca9a62750ae09dab9b_842a4f99d1015855e3e2b86470e8d61b, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:30,574 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229269ffc0f1594bca9a62750ae09dab9b_842a4f99d1015855e3e2b86470e8d61b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:30,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742277_1453 (size=12104) 2024-11-22T19:23:30,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742278_1454 (size=4469) 2024-11-22T19:23:30,639 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:30,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T19:23:30,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,640 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:30,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:30,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:30,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:30,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:30,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:30,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:30,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122acee0e57b3b54930b03bc953a87c32d9_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303409275/Put/seqid=0 2024-11-22T19:23:30,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742279_1455 (size=12154) 2024-11-22T19:23:30,986 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/b91bbc4967f3418bbdf3058ef2edcd94 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/b91bbc4967f3418bbdf3058ef2edcd94 2024-11-22T19:23:30,990 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/B of 842a4f99d1015855e3e2b86470e8d61b into b91bbc4967f3418bbdf3058ef2edcd94(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:30,991 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:30,991 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/B, priority=13, startTime=1732303410554; duration=0sec 2024-11-22T19:23:30,991 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:30,991 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:B 2024-11-22T19:23:30,991 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:30,992 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:30,992 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/C is initiating minor compaction (all files) 2024-11-22T19:23:30,992 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/C in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:30,992 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ba136914ca7348f092beaba699871ec0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0930dbc67d024977b555abe1368cabfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8c6e2c8e04e142208fd0408d91262b44] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=32.9 K 2024-11-22T19:23:30,992 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ba136914ca7348f092beaba699871ec0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732303407004 2024-11-22T19:23:30,993 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0930dbc67d024977b555abe1368cabfe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732303407026 2024-11-22T19:23:30,993 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c6e2c8e04e142208fd0408d91262b44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732303408150 2024-11-22T19:23:30,995 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#A#compaction#393 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:30,995 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/dd09bcaac20e41fb8b00e6bcd6966edd is 175, key is test_row_0/A:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:31,011 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#C#compaction#395 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:31,011 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/95ce74e6c6bc4036b99d47f14904b669 is 50, key is test_row_0/C:col10/1732303409165/Put/seqid=0 2024-11-22T19:23:31,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742280_1456 (size=31058) 2024-11-22T19:23:31,032 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/dd09bcaac20e41fb8b00e6bcd6966edd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/dd09bcaac20e41fb8b00e6bcd6966edd 2024-11-22T19:23:31,041 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/A of 842a4f99d1015855e3e2b86470e8d61b into dd09bcaac20e41fb8b00e6bcd6966edd(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:31,041 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:31,041 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/A, priority=13, startTime=1732303410553; duration=0sec 2024-11-22T19:23:31,041 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:31,041 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:A 2024-11-22T19:23:31,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742281_1457 (size=12104) 2024-11-22T19:23:31,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:31,071 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122acee0e57b3b54930b03bc953a87c32d9_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122acee0e57b3b54930b03bc953a87c32d9_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:31,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e1d3b4b174c74493a5422b7932c8529e, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e1d3b4b174c74493a5422b7932c8529e is 175, key is test_row_0/A:col10/1732303409275/Put/seqid=0 2024-11-22T19:23:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742282_1458 (size=30955) 2024-11-22T19:23:31,106 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e1d3b4b174c74493a5422b7932c8529e 2024-11-22T19:23:31,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/a5f9f1bdebba46739aa80d167c46f104 is 50, key is test_row_0/B:col10/1732303409275/Put/seqid=0 2024-11-22T19:23:31,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742283_1459 (size=12001) 2024-11-22T19:23:31,141 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/a5f9f1bdebba46739aa80d167c46f104 2024-11-22T19:23:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c9cd5cf8604542ec9584def83108d8f4 is 50, key is test_row_0/C:col10/1732303409275/Put/seqid=0 2024-11-22T19:23:31,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742284_1460 (size=12001) 2024-11-22T19:23:31,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:31,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:31,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:31,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303471428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303471433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303471435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303471435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303471436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,454 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/95ce74e6c6bc4036b99d47f14904b669 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/95ce74e6c6bc4036b99d47f14904b669 2024-11-22T19:23:31,460 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/C of 842a4f99d1015855e3e2b86470e8d61b into 95ce74e6c6bc4036b99d47f14904b669(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:31,460 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:31,460 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/C, priority=13, startTime=1732303410554; duration=0sec 2024-11-22T19:23:31,460 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:31,460 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:C 2024-11-22T19:23:31,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303471538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303471542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303471547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303471548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,563 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c9cd5cf8604542ec9584def83108d8f4 2024-11-22T19:23:31,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e1d3b4b174c74493a5422b7932c8529e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e 2024-11-22T19:23:31,571 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e, entries=150, sequenceid=78, filesize=30.2 K 2024-11-22T19:23:31,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/a5f9f1bdebba46739aa80d167c46f104 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a5f9f1bdebba46739aa80d167c46f104 2024-11-22T19:23:31,576 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a5f9f1bdebba46739aa80d167c46f104, entries=150, sequenceid=78, filesize=11.7 K 2024-11-22T19:23:31,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c9cd5cf8604542ec9584def83108d8f4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c9cd5cf8604542ec9584def83108d8f4 2024-11-22T19:23:31,581 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c9cd5cf8604542ec9584def83108d8f4, entries=150, sequenceid=78, filesize=11.7 K 2024-11-22T19:23:31,581 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 842a4f99d1015855e3e2b86470e8d61b in 941ms, sequenceid=78, compaction requested=false 2024-11-22T19:23:31,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:31,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:31,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-22T19:23:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-22T19:23:31,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-22T19:23:31,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4780 sec 2024-11-22T19:23:31,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.4820 sec 2024-11-22T19:23:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:31,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:23:31,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:31,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:31,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:31,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:31,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:31,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:31,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411226304b83ebf4142e4aa21614373cde96e_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:31,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742285_1461 (size=12154) 2024-11-22T19:23:31,775 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:31,778 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411226304b83ebf4142e4aa21614373cde96e_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411226304b83ebf4142e4aa21614373cde96e_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:31,779 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c1dd4cf3eadd4a8a8c24f5c1319c4743, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:31,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c1dd4cf3eadd4a8a8c24f5c1319c4743 is 175, key is test_row_0/A:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:31,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742286_1462 (size=30955) 2024-11-22T19:23:31,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303471794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303471795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303471796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303471803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303471904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303471904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303471904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:31,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303471910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303472110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303472110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303472111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303472115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,185 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c1dd4cf3eadd4a8a8c24f5c1319c4743 2024-11-22T19:23:32,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cf6977825bab4b6ba9c631c325c420b4 is 50, key is test_row_0/B:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:32,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742287_1463 (size=12001) 2024-11-22T19:23:32,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cf6977825bab4b6ba9c631c325c420b4 2024-11-22T19:23:32,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/86d6e62ef9324de6b755e162a5ad49dd is 50, key is test_row_0/C:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:32,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742288_1464 (size=12001) 2024-11-22T19:23:32,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303472416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303472416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303472417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303472424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/86d6e62ef9324de6b755e162a5ad49dd 2024-11-22T19:23:32,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c1dd4cf3eadd4a8a8c24f5c1319c4743 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743 2024-11-22T19:23:32,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743, entries=150, sequenceid=93, filesize=30.2 K 2024-11-22T19:23:32,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cf6977825bab4b6ba9c631c325c420b4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cf6977825bab4b6ba9c631c325c420b4 2024-11-22T19:23:32,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cf6977825bab4b6ba9c631c325c420b4, entries=150, sequenceid=93, filesize=11.7 K 2024-11-22T19:23:32,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/86d6e62ef9324de6b755e162a5ad49dd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/86d6e62ef9324de6b755e162a5ad49dd 2024-11-22T19:23:32,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/86d6e62ef9324de6b755e162a5ad49dd, entries=150, sequenceid=93, filesize=11.7 K 2024-11-22T19:23:32,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 842a4f99d1015855e3e2b86470e8d61b in 906ms, sequenceid=93, compaction requested=true 2024-11-22T19:23:32,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:32,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:32,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:32,655 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:32,655 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:32,656 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:32,657 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/A is initiating minor compaction (all files) 2024-11-22T19:23:32,657 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/A in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:32,657 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/dd09bcaac20e41fb8b00e6bcd6966edd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=90.8 K 2024-11-22T19:23:32,657 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:32,657 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/dd09bcaac20e41fb8b00e6bcd6966edd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743] 2024-11-22T19:23:32,657 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:32,657 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/B is initiating minor compaction (all files) 2024-11-22T19:23:32,657 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd09bcaac20e41fb8b00e6bcd6966edd, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732303408150 2024-11-22T19:23:32,657 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/B in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:32,657 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/b91bbc4967f3418bbdf3058ef2edcd94, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a5f9f1bdebba46739aa80d167c46f104, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cf6977825bab4b6ba9c631c325c420b4] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=35.3 K 2024-11-22T19:23:32,658 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1d3b4b174c74493a5422b7932c8529e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732303409275 2024-11-22T19:23:32,658 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b91bbc4967f3418bbdf3058ef2edcd94, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732303408150 2024-11-22T19:23:32,658 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a5f9f1bdebba46739aa80d167c46f104, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732303409275 2024-11-22T19:23:32,658 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1dd4cf3eadd4a8a8c24f5c1319c4743, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732303411433 2024-11-22T19:23:32,659 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cf6977825bab4b6ba9c631c325c420b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732303411433 2024-11-22T19:23:32,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:32,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:32,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:32,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:32,670 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#B#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:32,671 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9a63d66dd9784fba80698bed604ae981 is 50, key is test_row_0/B:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:32,676 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:32,692 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112260d01bd03cd34c4faeee28afd7277bf5_842a4f99d1015855e3e2b86470e8d61b store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:32,694 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112260d01bd03cd34c4faeee28afd7277bf5_842a4f99d1015855e3e2b86470e8d61b, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:32,694 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112260d01bd03cd34c4faeee28afd7277bf5_842a4f99d1015855e3e2b86470e8d61b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:32,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742289_1465 (size=12207) 2024-11-22T19:23:32,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742290_1466 (size=4469) 2024-11-22T19:23:32,715 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#A#compaction#402 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:32,716 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/463379ff8cf44ef69b8dd0cc80dc9073 is 175, key is test_row_0/A:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:32,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742291_1467 (size=31161) 2024-11-22T19:23:32,748 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/463379ff8cf44ef69b8dd0cc80dc9073 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/463379ff8cf44ef69b8dd0cc80dc9073 2024-11-22T19:23:32,755 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/A of 842a4f99d1015855e3e2b86470e8d61b into 463379ff8cf44ef69b8dd0cc80dc9073(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:32,755 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:32,755 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/A, priority=13, startTime=1732303412655; duration=0sec 2024-11-22T19:23:32,755 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:32,755 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:A 2024-11-22T19:23:32,755 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:32,756 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:32,756 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/C is initiating minor compaction (all files) 2024-11-22T19:23:32,757 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/C in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:32,757 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/95ce74e6c6bc4036b99d47f14904b669, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c9cd5cf8604542ec9584def83108d8f4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/86d6e62ef9324de6b755e162a5ad49dd] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=35.3 K 2024-11-22T19:23:32,758 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95ce74e6c6bc4036b99d47f14904b669, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732303408150 2024-11-22T19:23:32,758 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9cd5cf8604542ec9584def83108d8f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732303409275 2024-11-22T19:23:32,758 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86d6e62ef9324de6b755e162a5ad49dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732303411433 2024-11-22T19:23:32,777 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#C#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:32,778 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/4143f7a0f8374557a551d5f538481962 is 50, key is test_row_0/C:col10/1732303411435/Put/seqid=0 2024-11-22T19:23:32,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742292_1468 (size=12207) 2024-11-22T19:23:32,826 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/4143f7a0f8374557a551d5f538481962 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4143f7a0f8374557a551d5f538481962 2024-11-22T19:23:32,831 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/C of 842a4f99d1015855e3e2b86470e8d61b into 4143f7a0f8374557a551d5f538481962(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:32,831 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:32,831 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/C, priority=13, startTime=1732303412660; duration=0sec 2024-11-22T19:23:32,831 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:32,831 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:C 2024-11-22T19:23:32,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:32,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T19:23:32,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:32,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:32,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:32,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:32,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:32,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:32,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a3650cc9a6b842a8a94ee804c73facf5_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303411794/Put/seqid=0 2024-11-22T19:23:32,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303472944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303472944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303472947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:32,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303472950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:32,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742293_1469 (size=14594) 2024-11-22T19:23:32,985 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:32,990 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a3650cc9a6b842a8a94ee804c73facf5_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a3650cc9a6b842a8a94ee804c73facf5_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:32,991 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/159ad8fa3986436aa0db8d2e65be0809, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:32,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/159ad8fa3986436aa0db8d2e65be0809 is 175, key is test_row_0/A:col10/1732303411794/Put/seqid=0 2024-11-22T19:23:33,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742294_1470 (size=39549) 2024-11-22T19:23:33,012 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=122, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/159ad8fa3986436aa0db8d2e65be0809 2024-11-22T19:23:33,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cb7a2ad1e1324a8d892c539506ac3592 is 50, key is test_row_0/B:col10/1732303411794/Put/seqid=0 2024-11-22T19:23:33,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303473051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303473052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303473053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742295_1471 (size=12001) 2024-11-22T19:23:33,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cb7a2ad1e1324a8d892c539506ac3592 2024-11-22T19:23:33,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303473059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/5eb6b57f3f7248f8a79f5cb64e368f75 is 50, key is test_row_0/C:col10/1732303411794/Put/seqid=0 2024-11-22T19:23:33,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742296_1472 (size=12001) 2024-11-22T19:23:33,114 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9a63d66dd9784fba80698bed604ae981 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9a63d66dd9784fba80698bed604ae981 2024-11-22T19:23:33,119 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/B of 842a4f99d1015855e3e2b86470e8d61b into 9a63d66dd9784fba80698bed604ae981(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:33,119 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:33,119 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/B, priority=13, startTime=1732303412655; duration=0sec 2024-11-22T19:23:33,119 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:33,119 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:B 2024-11-22T19:23:33,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T19:23:33,209 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-22T19:23:33,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:33,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-22T19:23:33,215 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:33,215 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:33,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:33,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:33,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303473257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303473258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303473258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303473264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:33,367 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:33,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:33,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:33,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303473457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,461 DEBUG [Thread-1939 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:33,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/5eb6b57f3f7248f8a79f5cb64e368f75 2024-11-22T19:23:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:33,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/159ad8fa3986436aa0db8d2e65be0809 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809 2024-11-22T19:23:33,521 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:33,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:33,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:33,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809, entries=200, sequenceid=122, filesize=38.6 K 2024-11-22T19:23:33,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cb7a2ad1e1324a8d892c539506ac3592 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cb7a2ad1e1324a8d892c539506ac3592 2024-11-22T19:23:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cb7a2ad1e1324a8d892c539506ac3592, entries=150, sequenceid=122, filesize=11.7 K 2024-11-22T19:23:33,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/5eb6b57f3f7248f8a79f5cb64e368f75 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/5eb6b57f3f7248f8a79f5cb64e368f75 2024-11-22T19:23:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/5eb6b57f3f7248f8a79f5cb64e368f75, entries=150, sequenceid=122, filesize=11.7 K 2024-11-22T19:23:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 842a4f99d1015855e3e2b86470e8d61b in 615ms, sequenceid=122, compaction requested=false 2024-11-22T19:23:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:33,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:33,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:33,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:33,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112268248b355beb499a8cd2a72744a49e26_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:33,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303473647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303473653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303473654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742297_1473 (size=14794) 2024-11-22T19:23:33,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303473654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:33,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:33,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:33,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303473756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303473758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303473759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303473764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:33,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:33,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:33,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:33,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303473960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303473964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303473965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303473970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:33,985 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:33,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:33,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:33,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:33,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,063 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:34,066 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112268248b355beb499a8cd2a72744a49e26_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112268248b355beb499a8cd2a72744a49e26_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:34,067 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5eb950d6265a49d8a04b0b4f1180d717, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:34,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5eb950d6265a49d8a04b0b4f1180d717 is 175, key is test_row_0/A:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:34,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742298_1474 (size=39745) 2024-11-22T19:23:34,138 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:34,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:34,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:34,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T19:23:34,145 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-22T19:23:34,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303474264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303474269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303474269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303474278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,291 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:34,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:34,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:34,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:34,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:34,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:34,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:34,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,473 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5eb950d6265a49d8a04b0b4f1180d717 2024-11-22T19:23:34,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cc7a56d9efe54bd282f524e7cb1675d7 is 50, key is test_row_0/B:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:34,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742299_1475 (size=9757) 2024-11-22T19:23:34,597 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:34,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:34,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:34,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,750 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:34,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:34,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:34,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303474767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303474773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303474774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:34,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303474784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:34,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cc7a56d9efe54bd282f524e7cb1675d7 2024-11-22T19:23:34,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/e98aa01836c843f98b1cc8775580ff59 is 50, key is test_row_0/C:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:34,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742300_1476 (size=9757) 2024-11-22T19:23:34,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:34,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:34,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:34,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:34,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:35,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,209 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:35,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:35,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:35,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:35,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/e98aa01836c843f98b1cc8775580ff59 2024-11-22T19:23:35,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5eb950d6265a49d8a04b0b4f1180d717 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717 2024-11-22T19:23:35,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717, entries=200, sequenceid=134, filesize=38.8 K 2024-11-22T19:23:35,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/cc7a56d9efe54bd282f524e7cb1675d7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cc7a56d9efe54bd282f524e7cb1675d7 2024-11-22T19:23:35,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cc7a56d9efe54bd282f524e7cb1675d7, entries=100, sequenceid=134, filesize=9.5 K 2024-11-22T19:23:35,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/e98aa01836c843f98b1cc8775580ff59 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/e98aa01836c843f98b1cc8775580ff59 2024-11-22T19:23:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:35,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/e98aa01836c843f98b1cc8775580ff59, entries=100, sequenceid=134, filesize=9.5 K 2024-11-22T19:23:35,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 842a4f99d1015855e3e2b86470e8d61b in 1735ms, sequenceid=134, compaction requested=true 2024-11-22T19:23:35,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:35,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:35,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:35,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:35,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110455 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:35,325 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:35,325 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/A is initiating minor compaction (all files) 2024-11-22T19:23:35,326 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/A in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:35,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:35,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:35,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:35,326 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/463379ff8cf44ef69b8dd0cc80dc9073, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=107.9 K 2024-11-22T19:23:35,326 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,326 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/463379ff8cf44ef69b8dd0cc80dc9073, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717] 2024-11-22T19:23:35,326 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 463379ff8cf44ef69b8dd0cc80dc9073, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732303411433 2024-11-22T19:23:35,326 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 159ad8fa3986436aa0db8d2e65be0809, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732303411794 2024-11-22T19:23:35,327 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:35,327 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/B is initiating minor compaction (all files) 2024-11-22T19:23:35,327 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/B in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,327 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9a63d66dd9784fba80698bed604ae981, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cb7a2ad1e1324a8d892c539506ac3592, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cc7a56d9efe54bd282f524e7cb1675d7] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=33.2 K 2024-11-22T19:23:35,327 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5eb950d6265a49d8a04b0b4f1180d717, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732303412946 2024-11-22T19:23:35,327 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a63d66dd9784fba80698bed604ae981, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732303411433 2024-11-22T19:23:35,328 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cb7a2ad1e1324a8d892c539506ac3592, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732303411794 2024-11-22T19:23:35,328 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cc7a56d9efe54bd282f524e7cb1675d7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732303412946 2024-11-22T19:23:35,343 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#B#compaction#410 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:35,343 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9950be96f8f348228b9f068046474350 is 50, key is test_row_0/B:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:35,348 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:35,357 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122932084f1e6de4467b43ee79f01c33cee_842a4f99d1015855e3e2b86470e8d61b store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:35,359 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122932084f1e6de4467b43ee79f01c33cee_842a4f99d1015855e3e2b86470e8d61b, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:35,359 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122932084f1e6de4467b43ee79f01c33cee_842a4f99d1015855e3e2b86470e8d61b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:35,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:35,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T19:23:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,364 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742301_1477 (size=12409) 2024-11-22T19:23:35,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742302_1478 (size=4469) 2024-11-22T19:23:35,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a4b8f03535a545dcae7cd67d70cc655b_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303413646/Put/seqid=0 2024-11-22T19:23:35,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742303_1479 (size=12304) 2024-11-22T19:23:35,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:35,435 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a4b8f03535a545dcae7cd67d70cc655b_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a4b8f03535a545dcae7cd67d70cc655b_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:35,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e4379ea5b7884450b033c0127ed6a5f6, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:35,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e4379ea5b7884450b033c0127ed6a5f6 is 175, key is test_row_0/A:col10/1732303413646/Put/seqid=0 2024-11-22T19:23:35,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742304_1480 (size=31105) 2024-11-22T19:23:35,462 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e4379ea5b7884450b033c0127ed6a5f6 2024-11-22T19:23:35,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/c7f41e37e9f24787b494e3c7581600c8 is 50, key is test_row_0/B:col10/1732303413646/Put/seqid=0 2024-11-22T19:23:35,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742305_1481 (size=12151) 2024-11-22T19:23:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:35,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:35,789 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9950be96f8f348228b9f068046474350 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9950be96f8f348228b9f068046474350 2024-11-22T19:23:35,793 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/B of 842a4f99d1015855e3e2b86470e8d61b into 9950be96f8f348228b9f068046474350(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:35,793 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:35,793 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/B, priority=13, startTime=1732303415325; duration=0sec 2024-11-22T19:23:35,794 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:35,794 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:B 2024-11-22T19:23:35,794 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:35,795 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:35,795 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/C is initiating minor compaction (all files) 2024-11-22T19:23:35,795 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/C in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,795 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4143f7a0f8374557a551d5f538481962, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/5eb6b57f3f7248f8a79f5cb64e368f75, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/e98aa01836c843f98b1cc8775580ff59] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=33.2 K 2024-11-22T19:23:35,796 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4143f7a0f8374557a551d5f538481962, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732303411433 2024-11-22T19:23:35,796 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eb6b57f3f7248f8a79f5cb64e368f75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732303411794 2024-11-22T19:23:35,796 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e98aa01836c843f98b1cc8775580ff59, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732303412946 2024-11-22T19:23:35,802 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#C#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:35,803 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/04d870671cb5444fa227dedd96b71804 is 50, key is test_row_0/C:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:35,803 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#A#compaction#411 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:35,804 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/8936f744160d46afaaf2935a7e12794e is 175, key is test_row_0/A:col10/1732303413588/Put/seqid=0 2024-11-22T19:23:35,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303475797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:35,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303475798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:35,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742306_1482 (size=12409) 2024-11-22T19:23:35,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742307_1483 (size=31470) 2024-11-22T19:23:35,812 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/04d870671cb5444fa227dedd96b71804 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/04d870671cb5444fa227dedd96b71804 2024-11-22T19:23:35,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303475805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:35,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303475806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:35,816 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/C of 842a4f99d1015855e3e2b86470e8d61b into 04d870671cb5444fa227dedd96b71804(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:35,816 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:35,816 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/C, priority=13, startTime=1732303415326; duration=0sec 2024-11-22T19:23:35,816 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:35,816 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:C 2024-11-22T19:23:35,881 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/c7f41e37e9f24787b494e3c7581600c8 2024-11-22T19:23:35,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c943090950b7446cbf7f6c40645af3b7 is 50, key is test_row_0/C:col10/1732303413646/Put/seqid=0 2024-11-22T19:23:35,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742308_1484 (size=12151) 2024-11-22T19:23:35,892 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c943090950b7446cbf7f6c40645af3b7 2024-11-22T19:23:35,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/e4379ea5b7884450b033c0127ed6a5f6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6 2024-11-22T19:23:35,898 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6, entries=150, sequenceid=159, filesize=30.4 K 2024-11-22T19:23:35,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/c7f41e37e9f24787b494e3c7581600c8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7f41e37e9f24787b494e3c7581600c8 2024-11-22T19:23:35,903 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7f41e37e9f24787b494e3c7581600c8, entries=150, sequenceid=159, filesize=11.9 K 2024-11-22T19:23:35,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c943090950b7446cbf7f6c40645af3b7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c943090950b7446cbf7f6c40645af3b7 2024-11-22T19:23:35,908 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c943090950b7446cbf7f6c40645af3b7, entries=150, sequenceid=159, filesize=11.9 K 2024-11-22T19:23:35,909 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 842a4f99d1015855e3e2b86470e8d61b in 545ms, sequenceid=159, compaction requested=false 2024-11-22T19:23:35,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:35,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:35,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-22T19:23:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-22T19:23:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:35,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:35,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:35,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:35,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:35,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:35,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:35,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:35,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-22T19:23:35,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6950 sec 2024-11-22T19:23:35,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.7000 sec 2024-11-22T19:23:35,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223dfaa1867ff847bc8c10f6a3da117867_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:35,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742309_1485 (size=14794) 2024-11-22T19:23:35,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303475952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:35,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303475953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:35,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303475954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303476059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303476059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303476059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,213 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/8936f744160d46afaaf2935a7e12794e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8936f744160d46afaaf2935a7e12794e 2024-11-22T19:23:36,216 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/A of 842a4f99d1015855e3e2b86470e8d61b into 8936f744160d46afaaf2935a7e12794e(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:36,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:36,217 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/A, priority=13, startTime=1732303415324; duration=0sec 2024-11-22T19:23:36,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:36,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:A 2024-11-22T19:23:36,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303476262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303476262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303476263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,324 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:36,327 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223dfaa1867ff847bc8c10f6a3da117867_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223dfaa1867ff847bc8c10f6a3da117867_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:36,328 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/45f8b840345842eeb5c67c910ff1bb49, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:36,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/45f8b840345842eeb5c67c910ff1bb49 is 175, key is test_row_0/A:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:36,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742310_1486 (size=39749) 2024-11-22T19:23:36,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303476567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303476567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303476568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:36,732 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/45f8b840345842eeb5c67c910ff1bb49 2024-11-22T19:23:36,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9592da46d5fd44cfa5f7b3fc9e5aa707 is 50, key is test_row_0/B:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:36,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742311_1487 (size=12151) 2024-11-22T19:23:36,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9592da46d5fd44cfa5f7b3fc9e5aa707 2024-11-22T19:23:36,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/b2bbcc92b3174c57b4bb95b966b09607 is 50, key is test_row_0/C:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742312_1488 (size=12151) 2024-11-22T19:23:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303477073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303477074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303477075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/b2bbcc92b3174c57b4bb95b966b09607 2024-11-22T19:23:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/45f8b840345842eeb5c67c910ff1bb49 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49 2024-11-22T19:23:37,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49, entries=200, sequenceid=174, filesize=38.8 K 2024-11-22T19:23:37,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9592da46d5fd44cfa5f7b3fc9e5aa707 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9592da46d5fd44cfa5f7b3fc9e5aa707 2024-11-22T19:23:37,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9592da46d5fd44cfa5f7b3fc9e5aa707, entries=150, sequenceid=174, filesize=11.9 K 2024-11-22T19:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/b2bbcc92b3174c57b4bb95b966b09607 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b2bbcc92b3174c57b4bb95b966b09607 2024-11-22T19:23:37,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b2bbcc92b3174c57b4bb95b966b09607, entries=150, sequenceid=174, filesize=11.9 K 2024-11-22T19:23:37,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 842a4f99d1015855e3e2b86470e8d61b in 1282ms, sequenceid=174, compaction requested=true 2024-11-22T19:23:37,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:37,193 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:37,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102324 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:37,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/A is initiating minor compaction (all files) 2024-11-22T19:23:37,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/A in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:37,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8936f744160d46afaaf2935a7e12794e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=99.9 K 2024-11-22T19:23:37,194 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:37,194 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8936f744160d46afaaf2935a7e12794e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49] 2024-11-22T19:23:37,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:37,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:37,195 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8936f744160d46afaaf2935a7e12794e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732303412930 2024-11-22T19:23:37,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:37,195 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:37,196 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4379ea5b7884450b033c0127ed6a5f6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732303413646 2024-11-22T19:23:37,196 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45f8b840345842eeb5c67c910ff1bb49, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303415793 2024-11-22T19:23:37,196 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:37,196 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/B is initiating minor compaction (all files) 2024-11-22T19:23:37,196 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/B in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:37,197 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9950be96f8f348228b9f068046474350, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7f41e37e9f24787b494e3c7581600c8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9592da46d5fd44cfa5f7b3fc9e5aa707] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=35.9 K 2024-11-22T19:23:37,197 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9950be96f8f348228b9f068046474350, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732303412930 2024-11-22T19:23:37,198 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c7f41e37e9f24787b494e3c7581600c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732303413646 2024-11-22T19:23:37,199 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9592da46d5fd44cfa5f7b3fc9e5aa707, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303415793 2024-11-22T19:23:37,203 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:37,206 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122eb245776e963445ea33afe9a9fba3fcd_842a4f99d1015855e3e2b86470e8d61b store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:37,208 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122eb245776e963445ea33afe9a9fba3fcd_842a4f99d1015855e3e2b86470e8d61b, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:37,208 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122eb245776e963445ea33afe9a9fba3fcd_842a4f99d1015855e3e2b86470e8d61b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:37,216 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#B#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:37,216 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/2c58367b66f74f82ac82198022868ac9 is 50, key is test_row_0/B:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:37,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742313_1489 (size=4469) 2024-11-22T19:23:37,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742314_1490 (size=12561) 2024-11-22T19:23:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T19:23:37,323 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-22T19:23:37,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-22T19:23:37,327 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T19:23:37,328 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:37,328 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T19:23:37,480 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:37,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-22T19:23:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:37,481 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:37,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:37,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:37,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122045a1cc2d58b4c85b9e87e65de85a574_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303415953/Put/seqid=0 2024-11-22T19:23:37,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742315_1491 (size=12304) 2024-11-22T19:23:37,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:37,508 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122045a1cc2d58b4c85b9e87e65de85a574_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122045a1cc2d58b4c85b9e87e65de85a574_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:37,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/fe291f6fdc054618bec3d6fdb8614891, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:37,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/fe291f6fdc054618bec3d6fdb8614891 is 175, key is test_row_0/A:col10/1732303415953/Put/seqid=0 2024-11-22T19:23:37,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742316_1492 (size=31105) 2024-11-22T19:23:37,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303477551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,620 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#A#compaction#419 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:37,621 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/312b60af6eca401eb665075103dd0722 is 175, key is test_row_0/A:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T19:23:37,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742317_1493 (size=31515) 2024-11-22T19:23:37,635 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/312b60af6eca401eb665075103dd0722 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/312b60af6eca401eb665075103dd0722 2024-11-22T19:23:37,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/2c58367b66f74f82ac82198022868ac9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/2c58367b66f74f82ac82198022868ac9 2024-11-22T19:23:37,643 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/A of 842a4f99d1015855e3e2b86470e8d61b into 312b60af6eca401eb665075103dd0722(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:37,643 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:37,643 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/A, priority=13, startTime=1732303417193; duration=0sec 2024-11-22T19:23:37,643 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:37,643 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:A 2024-11-22T19:23:37,644 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:37,646 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/B of 842a4f99d1015855e3e2b86470e8d61b into 2c58367b66f74f82ac82198022868ac9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:37,646 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:37,646 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:37,646 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/B, priority=13, startTime=1732303417195; duration=0sec 2024-11-22T19:23:37,646 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/C is initiating minor compaction (all files) 2024-11-22T19:23:37,646 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:37,646 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:B 2024-11-22T19:23:37,646 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/C in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:37,646 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/04d870671cb5444fa227dedd96b71804, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c943090950b7446cbf7f6c40645af3b7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b2bbcc92b3174c57b4bb95b966b09607] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=35.9 K 2024-11-22T19:23:37,648 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04d870671cb5444fa227dedd96b71804, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732303412930 2024-11-22T19:23:37,648 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c943090950b7446cbf7f6c40645af3b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732303413646 2024-11-22T19:23:37,648 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2bbcc92b3174c57b4bb95b966b09607, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303415793 2024-11-22T19:23:37,656 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#C#compaction#422 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:37,656 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/bea3dc7c75bf46ca9dc00afe0b0c98d2 is 50, key is test_row_0/C:col10/1732303415796/Put/seqid=0 2024-11-22T19:23:37,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303477661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742318_1494 (size=12561) 2024-11-22T19:23:37,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303477814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,820 DEBUG [Thread-1937 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:37,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303477867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:37,920 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/fe291f6fdc054618bec3d6fdb8614891 2024-11-22T19:23:37,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/131b88b53dfe41d6a3c41cfa4c0428e0 is 50, key is test_row_0/B:col10/1732303415953/Put/seqid=0 2024-11-22T19:23:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T19:23:37,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742319_1495 (size=12151) 2024-11-22T19:23:38,079 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/bea3dc7c75bf46ca9dc00afe0b0c98d2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/bea3dc7c75bf46ca9dc00afe0b0c98d2 2024-11-22T19:23:38,083 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/C of 842a4f99d1015855e3e2b86470e8d61b into bea3dc7c75bf46ca9dc00afe0b0c98d2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:38,083 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:38,083 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/C, priority=13, startTime=1732303417196; duration=0sec 2024-11-22T19:23:38,083 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:38,083 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:C 2024-11-22T19:23:38,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303478082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:38,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303478083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:38,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303478085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:38,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303478172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:38,351 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/131b88b53dfe41d6a3c41cfa4c0428e0 2024-11-22T19:23:38,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/38d61b3559e64b578dfab59711abf50e is 50, key is test_row_0/C:col10/1732303415953/Put/seqid=0 2024-11-22T19:23:38,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742320_1496 (size=12151) 2024-11-22T19:23:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T19:23:38,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:38,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303478676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:38,762 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/38d61b3559e64b578dfab59711abf50e 2024-11-22T19:23:38,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/fe291f6fdc054618bec3d6fdb8614891 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891 2024-11-22T19:23:38,770 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891, entries=150, sequenceid=198, filesize=30.4 K 2024-11-22T19:23:38,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/131b88b53dfe41d6a3c41cfa4c0428e0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/131b88b53dfe41d6a3c41cfa4c0428e0 2024-11-22T19:23:38,774 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/131b88b53dfe41d6a3c41cfa4c0428e0, entries=150, sequenceid=198, filesize=11.9 K 2024-11-22T19:23:38,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/38d61b3559e64b578dfab59711abf50e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/38d61b3559e64b578dfab59711abf50e 2024-11-22T19:23:38,778 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/38d61b3559e64b578dfab59711abf50e, entries=150, sequenceid=198, filesize=11.9 K 2024-11-22T19:23:38,779 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 842a4f99d1015855e3e2b86470e8d61b in 1298ms, sequenceid=198, compaction requested=false 2024-11-22T19:23:38,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:38,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:38,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-22T19:23:38,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-22T19:23:38,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-22T19:23:38,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4520 sec 2024-11-22T19:23:38,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.4560 sec 2024-11-22T19:23:39,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T19:23:39,432 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-22T19:23:39,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:39,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-22T19:23:39,435 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:39,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T19:23:39,436 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:39,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:39,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T19:23:39,588 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:39,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-22T19:23:39,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:39,589 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:39,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:39,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:39,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:39,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:39,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:39,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:39,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228d0fe4c542b4445696ca7e0567eb655c_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303417550/Put/seqid=0 2024-11-22T19:23:39,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742321_1497 (size=12304) 2024-11-22T19:23:39,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:39,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:39,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T19:23:39,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303479789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:39,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303479895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:40,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:40,005 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228d0fe4c542b4445696ca7e0567eb655c_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228d0fe4c542b4445696ca7e0567eb655c_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:40,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5859095fa363426a93b6f87c273d0d23, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:40,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5859095fa363426a93b6f87c273d0d23 is 175, key is test_row_0/A:col10/1732303417550/Put/seqid=0 2024-11-22T19:23:40,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742322_1498 (size=31105) 2024-11-22T19:23:40,011 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5859095fa363426a93b6f87c273d0d23 2024-11-22T19:23:40,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9e1645d7b563459bbca67b6b58083476 is 50, key is test_row_0/B:col10/1732303417550/Put/seqid=0 2024-11-22T19:23:40,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742323_1499 (size=12151) 2024-11-22T19:23:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T19:23:40,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303480094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:40,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303480096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:40,099 DEBUG [Thread-1945 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:40,099 DEBUG [Thread-1943 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:40,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303480100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:40,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303480104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:40,107 DEBUG [Thread-1941 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:40,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:40,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303480408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:40,422 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9e1645d7b563459bbca67b6b58083476 2024-11-22T19:23:40,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ffaccc568ecd4b94a31525c367c1fe54 is 50, key is test_row_0/C:col10/1732303417550/Put/seqid=0 2024-11-22T19:23:40,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742324_1500 (size=12151) 2024-11-22T19:23:40,438 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ffaccc568ecd4b94a31525c367c1fe54 2024-11-22T19:23:40,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5859095fa363426a93b6f87c273d0d23 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23 2024-11-22T19:23:40,445 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23, entries=150, sequenceid=214, filesize=30.4 K 2024-11-22T19:23:40,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9e1645d7b563459bbca67b6b58083476 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9e1645d7b563459bbca67b6b58083476 2024-11-22T19:23:40,449 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9e1645d7b563459bbca67b6b58083476, entries=150, sequenceid=214, filesize=11.9 K 2024-11-22T19:23:40,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ffaccc568ecd4b94a31525c367c1fe54 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ffaccc568ecd4b94a31525c367c1fe54 2024-11-22T19:23:40,452 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ffaccc568ecd4b94a31525c367c1fe54, entries=150, sequenceid=214, filesize=11.9 K 2024-11-22T19:23:40,453 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 842a4f99d1015855e3e2b86470e8d61b in 865ms, sequenceid=214, compaction requested=true 2024-11-22T19:23:40,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:40,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:40,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-22T19:23:40,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-22T19:23:40,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-22T19:23:40,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0180 sec 2024-11-22T19:23:40,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.0230 sec 2024-11-22T19:23:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T19:23:40,538 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-22T19:23:40,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-22T19:23:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T19:23:40,541 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:40,541 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:40,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T19:23:40,693 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:40,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T19:23:40,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:40,694 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:40,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112286301cd036b547f1b20b05dee738d5b6_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303419778/Put/seqid=0 2024-11-22T19:23:40,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742325_1501 (size=12304) 2024-11-22T19:23:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:40,713 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112286301cd036b547f1b20b05dee738d5b6_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112286301cd036b547f1b20b05dee738d5b6_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:40,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/71e9ee43914f48c2afbd96835075bafe, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:40,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/71e9ee43914f48c2afbd96835075bafe is 175, key is test_row_0/A:col10/1732303419778/Put/seqid=0 2024-11-22T19:23:40,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742326_1502 (size=31105) 2024-11-22T19:23:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T19:23:40,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:40,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:40,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303480964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:41,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:41,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303481077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:41,133 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/71e9ee43914f48c2afbd96835075bafe 2024-11-22T19:23:41,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T19:23:41,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/bb8ac63cb8b7432bb2292c01b4cebe46 is 50, key is test_row_0/B:col10/1732303419778/Put/seqid=0 2024-11-22T19:23:41,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742327_1503 (size=12151) 2024-11-22T19:23:41,173 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/bb8ac63cb8b7432bb2292c01b4cebe46 2024-11-22T19:23:41,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/8b83cd90028b48d299a021201b48b2fd is 50, key is test_row_0/C:col10/1732303419778/Put/seqid=0 2024-11-22T19:23:41,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742328_1504 (size=12151) 2024-11-22T19:23:41,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:41,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303481285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:41,590 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/8b83cd90028b48d299a021201b48b2fd 2024-11-22T19:23:41,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/71e9ee43914f48c2afbd96835075bafe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe 2024-11-22T19:23:41,598 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe, entries=150, sequenceid=237, filesize=30.4 K 2024-11-22T19:23:41,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:41,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303481593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:41,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/bb8ac63cb8b7432bb2292c01b4cebe46 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/bb8ac63cb8b7432bb2292c01b4cebe46 2024-11-22T19:23:41,602 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/bb8ac63cb8b7432bb2292c01b4cebe46, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T19:23:41,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/8b83cd90028b48d299a021201b48b2fd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8b83cd90028b48d299a021201b48b2fd 2024-11-22T19:23:41,606 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8b83cd90028b48d299a021201b48b2fd, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T19:23:41,607 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 842a4f99d1015855e3e2b86470e8d61b in 914ms, sequenceid=237, compaction requested=true 2024-11-22T19:23:41,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:41,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:41,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-22T19:23:41,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-22T19:23:41,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-22T19:23:41,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0680 sec 2024-11-22T19:23:41,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.0710 sec 2024-11-22T19:23:41,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T19:23:41,644 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-22T19:23:41,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:41,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-22T19:23:41,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T19:23:41,647 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:41,647 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:41,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:41,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T19:23:41,799 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:41,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T19:23:41,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:41,800 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:41,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:41,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:41,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:41,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:41,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:41,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:41,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227a406c00ff2540a9b843bab7f1858bc0_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303420959/Put/seqid=0 2024-11-22T19:23:41,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742329_1505 (size=12304) 2024-11-22T19:23:41,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:41,819 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227a406c00ff2540a9b843bab7f1858bc0_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227a406c00ff2540a9b843bab7f1858bc0_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04c97a5553e04c4da5c171f6247395e5, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04c97a5553e04c4da5c171f6247395e5 is 175, key is test_row_0/A:col10/1732303420959/Put/seqid=0 2024-11-22T19:23:41,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742330_1506 (size=31105) 2024-11-22T19:23:41,825 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04c97a5553e04c4da5c171f6247395e5 2024-11-22T19:23:41,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:41,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:41,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/87d7688852894af9a8b460b8236ff3da is 50, key is test_row_0/B:col10/1732303420959/Put/seqid=0 2024-11-22T19:23:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742331_1507 (size=12151) 2024-11-22T19:23:41,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T19:23:41,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303481980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:42,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303482087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:42,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303482100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:42,239 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/87d7688852894af9a8b460b8236ff3da 2024-11-22T19:23:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T19:23:42,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c33b35215b754821867c2485bf71be46 is 50, key is test_row_0/C:col10/1732303420959/Put/seqid=0 2024-11-22T19:23:42,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742332_1508 (size=12151) 2024-11-22T19:23:42,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303482291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:42,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303482597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:42,656 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c33b35215b754821867c2485bf71be46 2024-11-22T19:23:42,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04c97a5553e04c4da5c171f6247395e5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5 2024-11-22T19:23:42,663 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5, entries=150, sequenceid=250, filesize=30.4 K 2024-11-22T19:23:42,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/87d7688852894af9a8b460b8236ff3da as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/87d7688852894af9a8b460b8236ff3da 2024-11-22T19:23:42,667 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/87d7688852894af9a8b460b8236ff3da, entries=150, sequenceid=250, filesize=11.9 K 2024-11-22T19:23:42,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/c33b35215b754821867c2485bf71be46 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c33b35215b754821867c2485bf71be46 2024-11-22T19:23:42,671 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c33b35215b754821867c2485bf71be46, entries=150, sequenceid=250, filesize=11.9 K 2024-11-22T19:23:42,672 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 842a4f99d1015855e3e2b86470e8d61b in 872ms, sequenceid=250, compaction requested=true 2024-11-22T19:23:42,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:42,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:42,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-22T19:23:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-22T19:23:42,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-22T19:23:42,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0260 sec 2024-11-22T19:23:42,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.0290 sec 2024-11-22T19:23:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T19:23:42,749 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-22T19:23:42,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-22T19:23:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T19:23:42,752 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:42,752 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:42,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T19:23:42,904 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:42,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-22T19:23:42,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:42,905 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:23:42,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:42,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:42,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:42,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:42,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:42,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:42,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c3b8930ce74e4be198d23a2fee5fe819_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303421952/Put/seqid=0 2024-11-22T19:23:42,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742333_1509 (size=12454) 2024-11-22T19:23:43,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T19:23:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:43,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:43,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303483132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303483135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303483236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303483239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:43,319 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c3b8930ce74e4be198d23a2fee5fe819_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c3b8930ce74e4be198d23a2fee5fe819_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:43,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/33d0b9083b054d73925d92cbe76c6b66, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:43,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/33d0b9083b054d73925d92cbe76c6b66 is 175, key is test_row_0/A:col10/1732303421952/Put/seqid=0 2024-11-22T19:23:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742334_1510 (size=31255) 2024-11-22T19:23:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T19:23:43,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303483442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303483445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,725 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/33d0b9083b054d73925d92cbe76c6b66 2024-11-22T19:23:43,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/149f921fdee947b882d4fa438598188f is 50, key is test_row_0/B:col10/1732303421952/Put/seqid=0 2024-11-22T19:23:43,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742335_1511 (size=12301) 2024-11-22T19:23:43,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303483752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303483754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:43,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T19:23:44,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:44,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1732303484115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:44,120 DEBUG [Thread-1941 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:44,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:44,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41236 deadline: 1732303484125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:44,132 DEBUG [Thread-1945 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:44,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:44,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41222 deadline: 1732303484130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:44,134 DEBUG [Thread-1943 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:23:44,143 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/149f921fdee947b882d4fa438598188f 2024-11-22T19:23:44,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/470ea578d76147e29abaa504ffa066c1 is 50, key is test_row_0/C:col10/1732303421952/Put/seqid=0 2024-11-22T19:23:44,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742336_1512 (size=12301) 2024-11-22T19:23:44,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:44,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303484259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:44,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:44,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303484261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:44,561 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/470ea578d76147e29abaa504ffa066c1 2024-11-22T19:23:44,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/33d0b9083b054d73925d92cbe76c6b66 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66 2024-11-22T19:23:44,568 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66, entries=150, sequenceid=273, filesize=30.5 K 2024-11-22T19:23:44,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/149f921fdee947b882d4fa438598188f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/149f921fdee947b882d4fa438598188f 2024-11-22T19:23:44,572 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/149f921fdee947b882d4fa438598188f, entries=150, sequenceid=273, filesize=12.0 K 2024-11-22T19:23:44,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/470ea578d76147e29abaa504ffa066c1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/470ea578d76147e29abaa504ffa066c1 2024-11-22T19:23:44,576 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/470ea578d76147e29abaa504ffa066c1, entries=150, sequenceid=273, filesize=12.0 K 2024-11-22T19:23:44,577 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 842a4f99d1015855e3e2b86470e8d61b in 1671ms, sequenceid=273, compaction requested=true 2024-11-22T19:23:44,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:44,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:44,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-22T19:23:44,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-22T19:23:44,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-22T19:23:44,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8250 sec 2024-11-22T19:23:44,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.8280 sec 2024-11-22T19:23:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T19:23:44,856 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-22T19:23:44,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:44,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-22T19:23:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-22T19:23:44,858 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:44,859 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:44,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:44,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-22T19:23:45,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:45,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-22T19:23:45,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:45,011 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T19:23:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:45,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222649e3cda78e406295f2042e16320328_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303423131/Put/seqid=0 2024-11-22T19:23:45,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742337_1513 (size=12454) 2024-11-22T19:23:45,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-22T19:23:45,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:45,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:45,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303485344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303485356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:45,438 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222649e3cda78e406295f2042e16320328_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222649e3cda78e406295f2042e16320328_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:45,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04f939d489884781a8a7ded414419001, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04f939d489884781a8a7ded414419001 is 175, key is test_row_0/A:col10/1732303423131/Put/seqid=0 2024-11-22T19:23:45,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742338_1514 (size=31255) 2024-11-22T19:23:45,449 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04f939d489884781a8a7ded414419001 2024-11-22T19:23:45,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9d45a1522c1e41c9b4c8fde3510d73a8 is 50, key is test_row_0/B:col10/1732303423131/Put/seqid=0 2024-11-22T19:23:45,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-22T19:23:45,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742339_1515 (size=12301) 2024-11-22T19:23:45,462 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9d45a1522c1e41c9b4c8fde3510d73a8 2024-11-22T19:23:45,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303485456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303485463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/fa8b0d986a07495787563c7b4667879b is 50, key is test_row_0/C:col10/1732303423131/Put/seqid=0 2024-11-22T19:23:45,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742340_1516 (size=12301) 2024-11-22T19:23:45,478 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/fa8b0d986a07495787563c7b4667879b 2024-11-22T19:23:45,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/04f939d489884781a8a7ded414419001 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001 2024-11-22T19:23:45,488 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001, entries=150, sequenceid=286, filesize=30.5 K 2024-11-22T19:23:45,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/9d45a1522c1e41c9b4c8fde3510d73a8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9d45a1522c1e41c9b4c8fde3510d73a8 2024-11-22T19:23:45,492 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9d45a1522c1e41c9b4c8fde3510d73a8, entries=150, sequenceid=286, filesize=12.0 K 2024-11-22T19:23:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/fa8b0d986a07495787563c7b4667879b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/fa8b0d986a07495787563c7b4667879b 2024-11-22T19:23:45,497 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/fa8b0d986a07495787563c7b4667879b, entries=150, sequenceid=286, filesize=12.0 K 2024-11-22T19:23:45,498 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 842a4f99d1015855e3e2b86470e8d61b in 487ms, sequenceid=286, compaction requested=true 2024-11-22T19:23:45,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:45,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:45,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-22T19:23:45,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-22T19:23:45,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-22T19:23:45,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 640 msec 2024-11-22T19:23:45,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 644 msec 2024-11-22T19:23:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:45,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:23:45,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:45,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:45,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:45,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:45,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:45,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:45,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a9cb2aa371874279830b06877dc83aa4_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:45,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742341_1517 (size=14994) 2024-11-22T19:23:45,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303485714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303485715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303485823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303485823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-22T19:23:45,962 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-22T19:23:45,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:45,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-22T19:23:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T19:23:45,965 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:45,966 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:45,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:46,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303486028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:46,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303486028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T19:23:46,082 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:46,085 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a9cb2aa371874279830b06877dc83aa4_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a9cb2aa371874279830b06877dc83aa4_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:46,086 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/b548675bfb614d4a8a4e43dcbf78ec7a, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:46,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/b548675bfb614d4a8a4e43dcbf78ec7a is 175, key is test_row_0/A:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:46,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742342_1518 (size=39949) 2024-11-22T19:23:46,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:46,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-22T19:23:46,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:46,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T19:23:46,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:46,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-22T19:23:46,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:46,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303486336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:46,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303486337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:46,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:46,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-22T19:23:46,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:46,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:46,491 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=310, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/b548675bfb614d4a8a4e43dcbf78ec7a 2024-11-22T19:23:46,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/757aefe9eae14938b632f2def8c0defa is 50, key is test_row_0/B:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:46,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742343_1519 (size=12301) 2024-11-22T19:23:46,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/757aefe9eae14938b632f2def8c0defa 2024-11-22T19:23:46,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/0fce5d116f2a495dadac6ee49526268f is 50, key is test_row_0/C:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:46,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742344_1520 (size=12301) 2024-11-22T19:23:46,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/0fce5d116f2a495dadac6ee49526268f 2024-11-22T19:23:46,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/b548675bfb614d4a8a4e43dcbf78ec7a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a 2024-11-22T19:23:46,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a, entries=200, sequenceid=310, filesize=39.0 K 2024-11-22T19:23:46,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/757aefe9eae14938b632f2def8c0defa as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/757aefe9eae14938b632f2def8c0defa 2024-11-22T19:23:46,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/757aefe9eae14938b632f2def8c0defa, entries=150, sequenceid=310, filesize=12.0 K 2024-11-22T19:23:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/0fce5d116f2a495dadac6ee49526268f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0fce5d116f2a495dadac6ee49526268f 2024-11-22T19:23:46,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0fce5d116f2a495dadac6ee49526268f, entries=150, sequenceid=310, filesize=12.0 K 2024-11-22T19:23:46,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 842a4f99d1015855e3e2b86470e8d61b in 858ms, sequenceid=310, compaction requested=true 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:46,530 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 8 store files, 0 compacting, 8 eligible, 16 blocking 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:46,530 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 8 store files, 0 compacting, 8 eligible, 16 blocking 2024-11-22T19:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:46,532 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 8 files of size 258394 starting at candidate #0 after considering 21 permutations with 21 in ratio 2024-11-22T19:23:46,532 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/A is initiating minor compaction (all files) 2024-11-22T19:23:46,532 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/A in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,532 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/312b60af6eca401eb665075103dd0722, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=252.3 K 2024-11-22T19:23:46,532 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=8 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,532 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/312b60af6eca401eb665075103dd0722, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a] 2024-11-22T19:23:46,533 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 312b60af6eca401eb665075103dd0722, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303415793 2024-11-22T19:23:46,533 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 8 files of size 98068 starting at candidate #0 after considering 21 permutations with 21 in ratio 2024-11-22T19:23:46,533 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/B is initiating minor compaction (all files) 2024-11-22T19:23:46,533 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/B in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,533 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/2c58367b66f74f82ac82198022868ac9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/131b88b53dfe41d6a3c41cfa4c0428e0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9e1645d7b563459bbca67b6b58083476, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/bb8ac63cb8b7432bb2292c01b4cebe46, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/87d7688852894af9a8b460b8236ff3da, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/149f921fdee947b882d4fa438598188f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9d45a1522c1e41c9b4c8fde3510d73a8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/757aefe9eae14938b632f2def8c0defa] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=95.8 K 2024-11-22T19:23:46,533 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe291f6fdc054618bec3d6fdb8614891, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732303415945 2024-11-22T19:23:46,533 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5859095fa363426a93b6f87c273d0d23, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732303417534 2024-11-22T19:23:46,533 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c58367b66f74f82ac82198022868ac9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303415793 2024-11-22T19:23:46,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71e9ee43914f48c2afbd96835075bafe, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732303419778 2024-11-22T19:23:46,534 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 131b88b53dfe41d6a3c41cfa4c0428e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732303415945 2024-11-22T19:23:46,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04c97a5553e04c4da5c171f6247395e5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303420930 2024-11-22T19:23:46,534 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e1645d7b563459bbca67b6b58083476, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732303417534 2024-11-22T19:23:46,534 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting bb8ac63cb8b7432bb2292c01b4cebe46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732303419778 2024-11-22T19:23:46,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33d0b9083b054d73925d92cbe76c6b66, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732303421952 2024-11-22T19:23:46,535 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 87d7688852894af9a8b460b8236ff3da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303420930 2024-11-22T19:23:46,535 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04f939d489884781a8a7ded414419001, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732303423115 2024-11-22T19:23:46,535 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 149f921fdee947b882d4fa438598188f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732303421952 2024-11-22T19:23:46,535 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b548675bfb614d4a8a4e43dcbf78ec7a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732303425331 2024-11-22T19:23:46,535 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d45a1522c1e41c9b4c8fde3510d73a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732303423115 2024-11-22T19:23:46,535 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 757aefe9eae14938b632f2def8c0defa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732303425343 2024-11-22T19:23:46,544 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:46,545 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122124a619dfba142eabce0ea2ca202af33_842a4f99d1015855e3e2b86470e8d61b store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:46,547 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#B#compaction#443 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:46,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/5429cd97f5cf46b2931b6766da4303a1 is 50, key is test_row_0/B:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:46,548 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122124a619dfba142eabce0ea2ca202af33_842a4f99d1015855e3e2b86470e8d61b, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:46,548 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122124a619dfba142eabce0ea2ca202af33_842a4f99d1015855e3e2b86470e8d61b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:46,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742345_1521 (size=12983) 2024-11-22T19:23:46,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742346_1522 (size=4469) 2024-11-22T19:23:46,552 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#A#compaction#444 average throughput is 3.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:46,552 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/3bc58ba77626436fb59bc47bb6a89a1d is 175, key is test_row_0/A:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:46,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742347_1523 (size=31937) 2024-11-22T19:23:46,558 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/3bc58ba77626436fb59bc47bb6a89a1d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/3bc58ba77626436fb59bc47bb6a89a1d 2024-11-22T19:23:46,562 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 8 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/A of 842a4f99d1015855e3e2b86470e8d61b into 3bc58ba77626436fb59bc47bb6a89a1d(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:46,562 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:46,562 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/A, priority=8, startTime=1732303426530; duration=0sec 2024-11-22T19:23:46,562 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:46,562 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:A 2024-11-22T19:23:46,562 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 8 store files, 0 compacting, 8 eligible, 16 blocking 2024-11-22T19:23:46,564 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 8 files of size 98068 starting at candidate #0 after considering 21 permutations with 21 in ratio 2024-11-22T19:23:46,564 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/C is initiating minor compaction (all files) 2024-11-22T19:23:46,564 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/C in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,564 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/bea3dc7c75bf46ca9dc00afe0b0c98d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/38d61b3559e64b578dfab59711abf50e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ffaccc568ecd4b94a31525c367c1fe54, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8b83cd90028b48d299a021201b48b2fd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c33b35215b754821867c2485bf71be46, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/470ea578d76147e29abaa504ffa066c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/fa8b0d986a07495787563c7b4667879b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0fce5d116f2a495dadac6ee49526268f] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=95.8 K 2024-11-22T19:23:46,564 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bea3dc7c75bf46ca9dc00afe0b0c98d2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303415793 2024-11-22T19:23:46,564 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38d61b3559e64b578dfab59711abf50e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732303415945 2024-11-22T19:23:46,565 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffaccc568ecd4b94a31525c367c1fe54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732303417534 2024-11-22T19:23:46,565 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b83cd90028b48d299a021201b48b2fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732303419778 2024-11-22T19:23:46,565 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c33b35215b754821867c2485bf71be46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303420930 2024-11-22T19:23:46,566 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 470ea578d76147e29abaa504ffa066c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732303421952 2024-11-22T19:23:46,566 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa8b0d986a07495787563c7b4667879b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732303423115 2024-11-22T19:23:46,566 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fce5d116f2a495dadac6ee49526268f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732303425343 2024-11-22T19:23:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T19:23:46,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:46,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:46,576 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:46,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:46,578 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#C#compaction#445 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:46,579 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/6c4901fc0131412c93747ce083bca01e is 50, key is test_row_0/C:col10/1732303425671/Put/seqid=0 2024-11-22T19:23:46,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122da109f951db441378382cda54776591b_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303425713/Put/seqid=0 2024-11-22T19:23:46,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742348_1524 (size=12983) 2024-11-22T19:23:46,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742349_1525 (size=12454) 2024-11-22T19:23:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:46,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. as already flushing 2024-11-22T19:23:46,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303486917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:46,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:46,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303486924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:46,955 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/5429cd97f5cf46b2931b6766da4303a1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/5429cd97f5cf46b2931b6766da4303a1 2024-11-22T19:23:46,959 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 8 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/B of 842a4f99d1015855e3e2b86470e8d61b into 5429cd97f5cf46b2931b6766da4303a1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:46,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:46,959 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/B, priority=8, startTime=1732303426530; duration=0sec 2024-11-22T19:23:46,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:46,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:B 2024-11-22T19:23:46,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:46,992 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/6c4901fc0131412c93747ce083bca01e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/6c4901fc0131412c93747ce083bca01e 2024-11-22T19:23:46,993 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122da109f951db441378382cda54776591b_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122da109f951db441378382cda54776591b_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:46,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/585a1219689b44cfabe90a6084f84149, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:46,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/585a1219689b44cfabe90a6084f84149 is 175, key is test_row_0/A:col10/1732303425713/Put/seqid=0 2024-11-22T19:23:46,998 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 8 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/C of 842a4f99d1015855e3e2b86470e8d61b into 6c4901fc0131412c93747ce083bca01e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:46,998 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:46,998 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/C, priority=8, startTime=1732303426530; duration=0sec 2024-11-22T19:23:46,998 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:46,998 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:C 2024-11-22T19:23:46,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742350_1526 (size=31255) 2024-11-22T19:23:46,999 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=323, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/585a1219689b44cfabe90a6084f84149 2024-11-22T19:23:47,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/7ec4b18e9a6e46a389ecb770b23a2f6c is 50, key is test_row_0/B:col10/1732303425713/Put/seqid=0 2024-11-22T19:23:47,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742351_1527 (size=12301) 2024-11-22T19:23:47,019 DEBUG [Thread-1948 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:57120 2024-11-22T19:23:47,019 DEBUG [Thread-1948 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:47,021 DEBUG [Thread-1952 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e3203d9 to 127.0.0.1:57120 2024-11-22T19:23:47,021 DEBUG [Thread-1952 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:47,022 DEBUG [Thread-1956 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7284f16d to 127.0.0.1:57120 2024-11-22T19:23:47,022 DEBUG [Thread-1956 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:47,023 DEBUG [Thread-1950 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:57120 2024-11-22T19:23:47,023 DEBUG [Thread-1950 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:47,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303487026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:47,027 DEBUG [Thread-1954 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:57120 2024-11-22T19:23:47,027 DEBUG [Thread-1954 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:47,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:47,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303487031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:47,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T19:23:47,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:47,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303487227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:47,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:47,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303487234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:47,409 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/7ec4b18e9a6e46a389ecb770b23a2f6c 2024-11-22T19:23:47,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ec8fb8edc62c4213964dc12942ea9490 is 50, key is test_row_0/C:col10/1732303425713/Put/seqid=0 2024-11-22T19:23:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742352_1528 (size=12301) 2024-11-22T19:23:47,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:47,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41180 deadline: 1732303487528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:47,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:47,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41272 deadline: 1732303487537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:47,819 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ec8fb8edc62c4213964dc12942ea9490 2024-11-22T19:23:47,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/585a1219689b44cfabe90a6084f84149 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149 2024-11-22T19:23:47,825 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149, entries=150, sequenceid=323, filesize=30.5 K 2024-11-22T19:23:47,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/7ec4b18e9a6e46a389ecb770b23a2f6c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7ec4b18e9a6e46a389ecb770b23a2f6c 2024-11-22T19:23:47,828 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7ec4b18e9a6e46a389ecb770b23a2f6c, entries=150, sequenceid=323, filesize=12.0 K 2024-11-22T19:23:47,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/ec8fb8edc62c4213964dc12942ea9490 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ec8fb8edc62c4213964dc12942ea9490 2024-11-22T19:23:47,831 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ec8fb8edc62c4213964dc12942ea9490, entries=150, sequenceid=323, filesize=12.0 K 2024-11-22T19:23:47,831 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 842a4f99d1015855e3e2b86470e8d61b in 1255ms, sequenceid=323, compaction requested=false 2024-11-22T19:23:47,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:47,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:47,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-22T19:23:47,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-22T19:23:47,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-22T19:23:47,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8660 sec 2024-11-22T19:23:47,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.8690 sec 2024-11-22T19:23:48,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:48,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:23:48,031 DEBUG [Thread-1937 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51f7d511 to 127.0.0.1:57120 2024-11-22T19:23:48,031 DEBUG [Thread-1937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:48,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:48,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:48,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:48,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:48,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:48,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:48,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222fbdb71d92a7411d813410006b2c33b8_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:48,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742353_1529 (size=12454) 2024-11-22T19:23:48,041 DEBUG [Thread-1939 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc42ea6 to 127.0.0.1:57120 2024-11-22T19:23:48,041 DEBUG [Thread-1939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T19:23:48,069 INFO [Thread-1947 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-22T19:23:48,440 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:48,443 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222fbdb71d92a7411d813410006b2c33b8_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222fbdb71d92a7411d813410006b2c33b8_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:48,444 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c6d12f0090674a228aee982e0b9c605c, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:48,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c6d12f0090674a228aee982e0b9c605c is 175, key is test_row_0/A:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:48,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742354_1530 (size=31255) 2024-11-22T19:23:48,848 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=350, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c6d12f0090674a228aee982e0b9c605c 2024-11-22T19:23:48,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/d40511676afc487aa4df9246ac596d36 is 50, key is test_row_0/B:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:48,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742355_1531 (size=12301) 2024-11-22T19:23:49,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/d40511676afc487aa4df9246ac596d36 2024-11-22T19:23:49,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/36252d27e4384ec1b2264bb6355fb16e is 50, key is test_row_0/C:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:49,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742356_1532 (size=12301) 2024-11-22T19:23:49,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/36252d27e4384ec1b2264bb6355fb16e 2024-11-22T19:23:49,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/c6d12f0090674a228aee982e0b9c605c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c 2024-11-22T19:23:49,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c, entries=150, sequenceid=350, filesize=30.5 K 2024-11-22T19:23:49,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/d40511676afc487aa4df9246ac596d36 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/d40511676afc487aa4df9246ac596d36 2024-11-22T19:23:49,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/d40511676afc487aa4df9246ac596d36, entries=150, sequenceid=350, filesize=12.0 K 2024-11-22T19:23:49,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/36252d27e4384ec1b2264bb6355fb16e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/36252d27e4384ec1b2264bb6355fb16e 2024-11-22T19:23:49,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/36252d27e4384ec1b2264bb6355fb16e, entries=150, sequenceid=350, filesize=12.0 K 2024-11-22T19:23:49,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for 842a4f99d1015855e3e2b86470e8d61b in 1647ms, sequenceid=350, compaction requested=true 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 842a4f99d1015855e3e2b86470e8d61b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:23:49,678 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:49,678 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:49,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:49,679 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:49,679 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/A is initiating minor compaction (all files) 2024-11-22T19:23:49,679 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:49,679 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/A in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:49,679 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/B is initiating minor compaction (all files) 2024-11-22T19:23:49,679 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/3bc58ba77626436fb59bc47bb6a89a1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=92.2 K 2024-11-22T19:23:49,679 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/B in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:49,679 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:49,679 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/3bc58ba77626436fb59bc47bb6a89a1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c] 2024-11-22T19:23:49,679 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/5429cd97f5cf46b2931b6766da4303a1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7ec4b18e9a6e46a389ecb770b23a2f6c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/d40511676afc487aa4df9246ac596d36] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=36.7 K 2024-11-22T19:23:49,680 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bc58ba77626436fb59bc47bb6a89a1d, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732303425343 2024-11-22T19:23:49,680 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5429cd97f5cf46b2931b6766da4303a1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732303425343 2024-11-22T19:23:49,680 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 585a1219689b44cfabe90a6084f84149, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732303425699 2024-11-22T19:23:49,680 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ec4b18e9a6e46a389ecb770b23a2f6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732303425699 2024-11-22T19:23:49,680 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6d12f0090674a228aee982e0b9c605c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732303426908 2024-11-22T19:23:49,680 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d40511676afc487aa4df9246ac596d36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732303426908 2024-11-22T19:23:49,686 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:49,686 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#B#compaction#452 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:49,686 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/7a4196cbb52940e0868f5544d2dbee23 is 50, key is test_row_0/B:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:49,687 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122f98e861db44a41f391642b1c113ae4b9_842a4f99d1015855e3e2b86470e8d61b store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:49,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742357_1533 (size=13085) 2024-11-22T19:23:49,689 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122f98e861db44a41f391642b1c113ae4b9_842a4f99d1015855e3e2b86470e8d61b, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:49,690 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f98e861db44a41f391642b1c113ae4b9_842a4f99d1015855e3e2b86470e8d61b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:49,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742358_1534 (size=4469) 2024-11-22T19:23:50,093 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/7a4196cbb52940e0868f5544d2dbee23 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7a4196cbb52940e0868f5544d2dbee23 2024-11-22T19:23:50,094 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#A#compaction#453 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:50,094 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5f00567ac76f42aa958449399bc0ffde is 175, key is test_row_0/A:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:50,097 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/B of 842a4f99d1015855e3e2b86470e8d61b into 7a4196cbb52940e0868f5544d2dbee23(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:50,097 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:50,097 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/B, priority=13, startTime=1732303429678; duration=0sec 2024-11-22T19:23:50,097 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:23:50,097 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:B 2024-11-22T19:23:50,097 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:23:50,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742359_1535 (size=32039) 2024-11-22T19:23:50,098 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:23:50,098 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 842a4f99d1015855e3e2b86470e8d61b/C is initiating minor compaction (all files) 2024-11-22T19:23:50,098 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 842a4f99d1015855e3e2b86470e8d61b/C in TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:50,098 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/6c4901fc0131412c93747ce083bca01e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ec8fb8edc62c4213964dc12942ea9490, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/36252d27e4384ec1b2264bb6355fb16e] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp, totalSize=36.7 K 2024-11-22T19:23:50,098 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c4901fc0131412c93747ce083bca01e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732303425343 2024-11-22T19:23:50,099 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ec8fb8edc62c4213964dc12942ea9490, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732303425699 2024-11-22T19:23:50,099 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 36252d27e4384ec1b2264bb6355fb16e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732303426908 2024-11-22T19:23:50,104 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 842a4f99d1015855e3e2b86470e8d61b#C#compaction#454 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:23:50,104 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/4cc6a54e5f95486f8fd5ea6a4381e53b is 50, key is test_row_0/C:col10/1732303426916/Put/seqid=0 2024-11-22T19:23:50,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742360_1536 (size=13085) 2024-11-22T19:23:50,501 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/5f00567ac76f42aa958449399bc0ffde as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5f00567ac76f42aa958449399bc0ffde 2024-11-22T19:23:50,504 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/A of 842a4f99d1015855e3e2b86470e8d61b into 5f00567ac76f42aa958449399bc0ffde(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:50,504 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:50,504 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/A, priority=13, startTime=1732303429678; duration=0sec 2024-11-22T19:23:50,504 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:50,504 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:A 2024-11-22T19:23:50,510 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/4cc6a54e5f95486f8fd5ea6a4381e53b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4cc6a54e5f95486f8fd5ea6a4381e53b 2024-11-22T19:23:50,513 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 842a4f99d1015855e3e2b86470e8d61b/C of 842a4f99d1015855e3e2b86470e8d61b into 4cc6a54e5f95486f8fd5ea6a4381e53b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:23:50,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:50,513 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b., storeName=842a4f99d1015855e3e2b86470e8d61b/C, priority=13, startTime=1732303429678; duration=0sec 2024-11-22T19:23:50,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:23:50,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 842a4f99d1015855e3e2b86470e8d61b:C 2024-11-22T19:23:52,401 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T19:23:54,153 DEBUG [Thread-1945 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:57120 2024-11-22T19:23:54,153 DEBUG [Thread-1945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:54,175 DEBUG [Thread-1941 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:57120 2024-11-22T19:23:54,175 DEBUG [Thread-1941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:54,205 DEBUG [Thread-1943 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:57120 2024-11-22T19:23:54,206 DEBUG [Thread-1943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2205 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6614 rows 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2209 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6627 rows 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2210 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6630 rows 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2205 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6615 rows 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2199 2024-11-22T19:23:54,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6597 rows 2024-11-22T19:23:54,206 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:23:54,206 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5fe71801 to 127.0.0.1:57120 2024-11-22T19:23:54,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:23:54,208 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T19:23:54,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T19:23:54,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:54,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-22T19:23:54,212 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303434211"}]},"ts":"1732303434211"} 2024-11-22T19:23:54,213 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T19:23:54,215 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T19:23:54,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:23:54,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, UNASSIGN}] 2024-11-22T19:23:54,217 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, UNASSIGN 2024-11-22T19:23:54,218 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:54,219 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:23:54,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; CloseRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:23:54,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-22T19:23:54,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:54,370 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(124): Close 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:54,370 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1681): Closing 842a4f99d1015855e3e2b86470e8d61b, disabling compactions & flushes 2024-11-22T19:23:54,371 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. after waiting 0 ms 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:54,371 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(2837): Flushing 842a4f99d1015855e3e2b86470e8d61b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=A 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=B 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 842a4f99d1015855e3e2b86470e8d61b, store=C 2024-11-22T19:23:54,371 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:54,376 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223a229fbe32dc4b3ab49c963fa250796d_842a4f99d1015855e3e2b86470e8d61b is 50, key is test_row_0/A:col10/1732303434204/Put/seqid=0 2024-11-22T19:23:54,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742361_1537 (size=12454) 2024-11-22T19:23:54,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-22T19:23:54,780 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:54,783 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223a229fbe32dc4b3ab49c963fa250796d_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223a229fbe32dc4b3ab49c963fa250796d_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:54,784 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/8560cd5a5aa64249b84be2cb7a59f3ae, store: [table=TestAcidGuarantees family=A region=842a4f99d1015855e3e2b86470e8d61b] 2024-11-22T19:23:54,785 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/8560cd5a5aa64249b84be2cb7a59f3ae is 175, key is test_row_0/A:col10/1732303434204/Put/seqid=0 2024-11-22T19:23:54,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742362_1538 (size=31255) 2024-11-22T19:23:54,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-22T19:23:55,188 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=360, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/8560cd5a5aa64249b84be2cb7a59f3ae 2024-11-22T19:23:55,193 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/ce7f58baa5ce4b408e8c175fb313b021 is 50, key is test_row_0/B:col10/1732303434204/Put/seqid=0 2024-11-22T19:23:55,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742363_1539 (size=12301) 2024-11-22T19:23:55,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-22T19:23:55,597 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/ce7f58baa5ce4b408e8c175fb313b021 2024-11-22T19:23:55,602 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/b4c18e3b26844c58bade1b7c1d971320 is 50, key is test_row_0/C:col10/1732303434204/Put/seqid=0 2024-11-22T19:23:55,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742364_1540 (size=12301) 2024-11-22T19:23:55,606 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/b4c18e3b26844c58bade1b7c1d971320 2024-11-22T19:23:55,609 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/A/8560cd5a5aa64249b84be2cb7a59f3ae as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8560cd5a5aa64249b84be2cb7a59f3ae 2024-11-22T19:23:55,611 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8560cd5a5aa64249b84be2cb7a59f3ae, entries=150, sequenceid=360, filesize=30.5 K 2024-11-22T19:23:55,612 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/B/ce7f58baa5ce4b408e8c175fb313b021 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/ce7f58baa5ce4b408e8c175fb313b021 2024-11-22T19:23:55,614 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/ce7f58baa5ce4b408e8c175fb313b021, entries=150, sequenceid=360, filesize=12.0 K 2024-11-22T19:23:55,615 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/.tmp/C/b4c18e3b26844c58bade1b7c1d971320 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b4c18e3b26844c58bade1b7c1d971320 2024-11-22T19:23:55,617 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b4c18e3b26844c58bade1b7c1d971320, entries=150, sequenceid=360, filesize=12.0 K 2024-11-22T19:23:55,618 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 842a4f99d1015855e3e2b86470e8d61b in 1247ms, sequenceid=360, compaction requested=false 2024-11-22T19:23:55,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/dd09bcaac20e41fb8b00e6bcd6966edd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/463379ff8cf44ef69b8dd0cc80dc9073, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8936f744160d46afaaf2935a7e12794e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/312b60af6eca401eb665075103dd0722, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/3bc58ba77626436fb59bc47bb6a89a1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c] to archive 2024-11-22T19:23:55,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:23:55,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/f5258b624a9e40d5846cb3c92ba5f29a 2024-11-22T19:23:55,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/d8eb3419cdf74f10b3a3c39f654c3da0 2024-11-22T19:23:55,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/684c292c149a49d2b4fab8329b9bc6ad 2024-11-22T19:23:55,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/dd09bcaac20e41fb8b00e6bcd6966edd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/dd09bcaac20e41fb8b00e6bcd6966edd 2024-11-22T19:23:55,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e1d3b4b174c74493a5422b7932c8529e 2024-11-22T19:23:55,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/463379ff8cf44ef69b8dd0cc80dc9073 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/463379ff8cf44ef69b8dd0cc80dc9073 2024-11-22T19:23:55,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c1dd4cf3eadd4a8a8c24f5c1319c4743 2024-11-22T19:23:55,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/159ad8fa3986436aa0db8d2e65be0809 2024-11-22T19:23:55,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5eb950d6265a49d8a04b0b4f1180d717 2024-11-22T19:23:55,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8936f744160d46afaaf2935a7e12794e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8936f744160d46afaaf2935a7e12794e 2024-11-22T19:23:55,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/e4379ea5b7884450b033c0127ed6a5f6 2024-11-22T19:23:55,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/45f8b840345842eeb5c67c910ff1bb49 2024-11-22T19:23:55,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/312b60af6eca401eb665075103dd0722 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/312b60af6eca401eb665075103dd0722 2024-11-22T19:23:55,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/fe291f6fdc054618bec3d6fdb8614891 2024-11-22T19:23:55,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5859095fa363426a93b6f87c273d0d23 2024-11-22T19:23:55,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/71e9ee43914f48c2afbd96835075bafe 2024-11-22T19:23:55,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04c97a5553e04c4da5c171f6247395e5 2024-11-22T19:23:55,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/33d0b9083b054d73925d92cbe76c6b66 2024-11-22T19:23:55,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/04f939d489884781a8a7ded414419001 2024-11-22T19:23:55,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/b548675bfb614d4a8a4e43dcbf78ec7a 2024-11-22T19:23:55,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/3bc58ba77626436fb59bc47bb6a89a1d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/3bc58ba77626436fb59bc47bb6a89a1d 2024-11-22T19:23:55,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/585a1219689b44cfabe90a6084f84149 2024-11-22T19:23:55,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/c6d12f0090674a228aee982e0b9c605c 2024-11-22T19:23:55,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/4f955b6a65a443eab6c632dc2a714ed9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a22f677b78eb48c5af6d678d73ce7807, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/b91bbc4967f3418bbdf3058ef2edcd94, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7cbfa707e344db0954a2da62c50637b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a5f9f1bdebba46739aa80d167c46f104, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9a63d66dd9784fba80698bed604ae981, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cf6977825bab4b6ba9c631c325c420b4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cb7a2ad1e1324a8d892c539506ac3592, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9950be96f8f348228b9f068046474350, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cc7a56d9efe54bd282f524e7cb1675d7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7f41e37e9f24787b494e3c7581600c8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/2c58367b66f74f82ac82198022868ac9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9592da46d5fd44cfa5f7b3fc9e5aa707, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/131b88b53dfe41d6a3c41cfa4c0428e0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9e1645d7b563459bbca67b6b58083476, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/bb8ac63cb8b7432bb2292c01b4cebe46, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/87d7688852894af9a8b460b8236ff3da, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/149f921fdee947b882d4fa438598188f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9d45a1522c1e41c9b4c8fde3510d73a8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/5429cd97f5cf46b2931b6766da4303a1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/757aefe9eae14938b632f2def8c0defa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7ec4b18e9a6e46a389ecb770b23a2f6c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/d40511676afc487aa4df9246ac596d36] to archive 2024-11-22T19:23:55,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:23:55,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/4f955b6a65a443eab6c632dc2a714ed9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/4f955b6a65a443eab6c632dc2a714ed9 2024-11-22T19:23:55,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a22f677b78eb48c5af6d678d73ce7807 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a22f677b78eb48c5af6d678d73ce7807 2024-11-22T19:23:55,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/b91bbc4967f3418bbdf3058ef2edcd94 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/b91bbc4967f3418bbdf3058ef2edcd94 2024-11-22T19:23:55,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7cbfa707e344db0954a2da62c50637b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7cbfa707e344db0954a2da62c50637b 2024-11-22T19:23:55,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a5f9f1bdebba46739aa80d167c46f104 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/a5f9f1bdebba46739aa80d167c46f104 2024-11-22T19:23:55,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9a63d66dd9784fba80698bed604ae981 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9a63d66dd9784fba80698bed604ae981 2024-11-22T19:23:55,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cf6977825bab4b6ba9c631c325c420b4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cf6977825bab4b6ba9c631c325c420b4 2024-11-22T19:23:55,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cb7a2ad1e1324a8d892c539506ac3592 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cb7a2ad1e1324a8d892c539506ac3592 2024-11-22T19:23:55,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9950be96f8f348228b9f068046474350 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9950be96f8f348228b9f068046474350 2024-11-22T19:23:55,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cc7a56d9efe54bd282f524e7cb1675d7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/cc7a56d9efe54bd282f524e7cb1675d7 2024-11-22T19:23:55,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7f41e37e9f24787b494e3c7581600c8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/c7f41e37e9f24787b494e3c7581600c8 2024-11-22T19:23:55,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/2c58367b66f74f82ac82198022868ac9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/2c58367b66f74f82ac82198022868ac9 2024-11-22T19:23:55,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9592da46d5fd44cfa5f7b3fc9e5aa707 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9592da46d5fd44cfa5f7b3fc9e5aa707 2024-11-22T19:23:55,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/131b88b53dfe41d6a3c41cfa4c0428e0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/131b88b53dfe41d6a3c41cfa4c0428e0 2024-11-22T19:23:55,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9e1645d7b563459bbca67b6b58083476 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9e1645d7b563459bbca67b6b58083476 2024-11-22T19:23:55,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/bb8ac63cb8b7432bb2292c01b4cebe46 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/bb8ac63cb8b7432bb2292c01b4cebe46 2024-11-22T19:23:55,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/87d7688852894af9a8b460b8236ff3da to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/87d7688852894af9a8b460b8236ff3da 2024-11-22T19:23:55,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/149f921fdee947b882d4fa438598188f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/149f921fdee947b882d4fa438598188f 2024-11-22T19:23:55,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9d45a1522c1e41c9b4c8fde3510d73a8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/9d45a1522c1e41c9b4c8fde3510d73a8 2024-11-22T19:23:55,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/5429cd97f5cf46b2931b6766da4303a1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/5429cd97f5cf46b2931b6766da4303a1 2024-11-22T19:23:55,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/757aefe9eae14938b632f2def8c0defa to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/757aefe9eae14938b632f2def8c0defa 2024-11-22T19:23:55,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7ec4b18e9a6e46a389ecb770b23a2f6c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7ec4b18e9a6e46a389ecb770b23a2f6c 2024-11-22T19:23:55,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/d40511676afc487aa4df9246ac596d36 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/d40511676afc487aa4df9246ac596d36 2024-11-22T19:23:55,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ba136914ca7348f092beaba699871ec0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0930dbc67d024977b555abe1368cabfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/95ce74e6c6bc4036b99d47f14904b669, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8c6e2c8e04e142208fd0408d91262b44, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c9cd5cf8604542ec9584def83108d8f4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4143f7a0f8374557a551d5f538481962, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/86d6e62ef9324de6b755e162a5ad49dd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/5eb6b57f3f7248f8a79f5cb64e368f75, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/04d870671cb5444fa227dedd96b71804, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/e98aa01836c843f98b1cc8775580ff59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c943090950b7446cbf7f6c40645af3b7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/bea3dc7c75bf46ca9dc00afe0b0c98d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b2bbcc92b3174c57b4bb95b966b09607, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/38d61b3559e64b578dfab59711abf50e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ffaccc568ecd4b94a31525c367c1fe54, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8b83cd90028b48d299a021201b48b2fd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c33b35215b754821867c2485bf71be46, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/470ea578d76147e29abaa504ffa066c1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/fa8b0d986a07495787563c7b4667879b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/6c4901fc0131412c93747ce083bca01e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0fce5d116f2a495dadac6ee49526268f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ec8fb8edc62c4213964dc12942ea9490, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/36252d27e4384ec1b2264bb6355fb16e] to archive 2024-11-22T19:23:55,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:23:55,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ba136914ca7348f092beaba699871ec0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ba136914ca7348f092beaba699871ec0 2024-11-22T19:23:55,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0930dbc67d024977b555abe1368cabfe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0930dbc67d024977b555abe1368cabfe 2024-11-22T19:23:55,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/95ce74e6c6bc4036b99d47f14904b669 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/95ce74e6c6bc4036b99d47f14904b669 2024-11-22T19:23:55,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8c6e2c8e04e142208fd0408d91262b44 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8c6e2c8e04e142208fd0408d91262b44 2024-11-22T19:23:55,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c9cd5cf8604542ec9584def83108d8f4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c9cd5cf8604542ec9584def83108d8f4 2024-11-22T19:23:55,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4143f7a0f8374557a551d5f538481962 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4143f7a0f8374557a551d5f538481962 2024-11-22T19:23:55,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/86d6e62ef9324de6b755e162a5ad49dd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/86d6e62ef9324de6b755e162a5ad49dd 2024-11-22T19:23:55,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/5eb6b57f3f7248f8a79f5cb64e368f75 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/5eb6b57f3f7248f8a79f5cb64e368f75 2024-11-22T19:23:55,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/04d870671cb5444fa227dedd96b71804 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/04d870671cb5444fa227dedd96b71804 2024-11-22T19:23:55,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/e98aa01836c843f98b1cc8775580ff59 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/e98aa01836c843f98b1cc8775580ff59 2024-11-22T19:23:55,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c943090950b7446cbf7f6c40645af3b7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c943090950b7446cbf7f6c40645af3b7 2024-11-22T19:23:55,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/bea3dc7c75bf46ca9dc00afe0b0c98d2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/bea3dc7c75bf46ca9dc00afe0b0c98d2 2024-11-22T19:23:55,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b2bbcc92b3174c57b4bb95b966b09607 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b2bbcc92b3174c57b4bb95b966b09607 2024-11-22T19:23:55,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/38d61b3559e64b578dfab59711abf50e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/38d61b3559e64b578dfab59711abf50e 2024-11-22T19:23:55,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ffaccc568ecd4b94a31525c367c1fe54 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ffaccc568ecd4b94a31525c367c1fe54 2024-11-22T19:23:55,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8b83cd90028b48d299a021201b48b2fd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/8b83cd90028b48d299a021201b48b2fd 2024-11-22T19:23:55,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c33b35215b754821867c2485bf71be46 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/c33b35215b754821867c2485bf71be46 2024-11-22T19:23:55,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/470ea578d76147e29abaa504ffa066c1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/470ea578d76147e29abaa504ffa066c1 2024-11-22T19:23:55,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/fa8b0d986a07495787563c7b4667879b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/fa8b0d986a07495787563c7b4667879b 2024-11-22T19:23:55,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/6c4901fc0131412c93747ce083bca01e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/6c4901fc0131412c93747ce083bca01e 2024-11-22T19:23:55,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0fce5d116f2a495dadac6ee49526268f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/0fce5d116f2a495dadac6ee49526268f 2024-11-22T19:23:55,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ec8fb8edc62c4213964dc12942ea9490 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/ec8fb8edc62c4213964dc12942ea9490 2024-11-22T19:23:55,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/36252d27e4384ec1b2264bb6355fb16e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/36252d27e4384ec1b2264bb6355fb16e 2024-11-22T19:23:55,680 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/recovered.edits/363.seqid, newMaxSeqId=363, maxSeqId=4 2024-11-22T19:23:55,681 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b. 2024-11-22T19:23:55,681 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1635): Region close journal for 842a4f99d1015855e3e2b86470e8d61b: 2024-11-22T19:23:55,682 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(170): Closed 842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:55,682 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=842a4f99d1015855e3e2b86470e8d61b, regionState=CLOSED 2024-11-22T19:23:55,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-22T19:23:55,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; CloseRegionProcedure 842a4f99d1015855e3e2b86470e8d61b, server=a307a1377457,35917,1732303314657 in 1.4640 sec 2024-11-22T19:23:55,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-11-22T19:23:55,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=842a4f99d1015855e3e2b86470e8d61b, UNASSIGN in 1.4670 sec 2024-11-22T19:23:55,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-22T19:23:55,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4700 sec 2024-11-22T19:23:55,687 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303435686"}]},"ts":"1732303435686"} 2024-11-22T19:23:55,687 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T19:23:55,689 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T19:23:55,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4810 sec 2024-11-22T19:23:56,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-22T19:23:56,314 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-22T19:23:56,315 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T19:23:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,316 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=132, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T19:23:56,317 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=132, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,318 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,320 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/recovered.edits] 2024-11-22T19:23:56,322 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5f00567ac76f42aa958449399bc0ffde to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/5f00567ac76f42aa958449399bc0ffde 2024-11-22T19:23:56,323 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8560cd5a5aa64249b84be2cb7a59f3ae to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/A/8560cd5a5aa64249b84be2cb7a59f3ae 2024-11-22T19:23:56,324 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7a4196cbb52940e0868f5544d2dbee23 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/7a4196cbb52940e0868f5544d2dbee23 2024-11-22T19:23:56,325 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/ce7f58baa5ce4b408e8c175fb313b021 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/B/ce7f58baa5ce4b408e8c175fb313b021 2024-11-22T19:23:56,327 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4cc6a54e5f95486f8fd5ea6a4381e53b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/4cc6a54e5f95486f8fd5ea6a4381e53b 2024-11-22T19:23:56,327 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b4c18e3b26844c58bade1b7c1d971320 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/C/b4c18e3b26844c58bade1b7c1d971320 2024-11-22T19:23:56,330 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/recovered.edits/363.seqid to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b/recovered.edits/363.seqid 2024-11-22T19:23:56,330 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,330 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T19:23:56,330 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T19:23:56,331 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-22T19:23:56,333 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122045a1cc2d58b4c85b9e87e65de85a574_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122045a1cc2d58b4c85b9e87e65de85a574_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,334 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222649e3cda78e406295f2042e16320328_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222649e3cda78e406295f2042e16320328_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,335 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222c06254588c545b78d43f0c43117ab99_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222c06254588c545b78d43f0c43117ab99_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,336 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222fbdb71d92a7411d813410006b2c33b8_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222fbdb71d92a7411d813410006b2c33b8_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,337 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223a229fbe32dc4b3ab49c963fa250796d_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223a229fbe32dc4b3ab49c963fa250796d_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,337 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223dfaa1867ff847bc8c10f6a3da117867_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411223dfaa1867ff847bc8c10f6a3da117867_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,338 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411226304b83ebf4142e4aa21614373cde96e_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411226304b83ebf4142e4aa21614373cde96e_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,339 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112268248b355beb499a8cd2a72744a49e26_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112268248b355beb499a8cd2a72744a49e26_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,340 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227a406c00ff2540a9b843bab7f1858bc0_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227a406c00ff2540a9b843bab7f1858bc0_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,341 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227dfa3c9a62dd4f5c9db88969556ffda6_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227dfa3c9a62dd4f5c9db88969556ffda6_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,342 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112286301cd036b547f1b20b05dee738d5b6_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112286301cd036b547f1b20b05dee738d5b6_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,343 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122864846e6b90341ee87ccb5af9240cecb_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122864846e6b90341ee87ccb5af9240cecb_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,343 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228d0fe4c542b4445696ca7e0567eb655c_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228d0fe4c542b4445696ca7e0567eb655c_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,344 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a3650cc9a6b842a8a94ee804c73facf5_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a3650cc9a6b842a8a94ee804c73facf5_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,345 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a4b8f03535a545dcae7cd67d70cc655b_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a4b8f03535a545dcae7cd67d70cc655b_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,346 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a9cb2aa371874279830b06877dc83aa4_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a9cb2aa371874279830b06877dc83aa4_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,347 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122acee0e57b3b54930b03bc953a87c32d9_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122acee0e57b3b54930b03bc953a87c32d9_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,347 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c3b8930ce74e4be198d23a2fee5fe819_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c3b8930ce74e4be198d23a2fee5fe819_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,348 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122da109f951db441378382cda54776591b_842a4f99d1015855e3e2b86470e8d61b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122da109f951db441378382cda54776591b_842a4f99d1015855e3e2b86470e8d61b 2024-11-22T19:23:56,349 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T19:23:56,350 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=132, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,352 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T19:23:56,353 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T19:23:56,354 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=132, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,354 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T19:23:56,354 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732303436354"}]},"ts":"9223372036854775807"} 2024-11-22T19:23:56,355 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T19:23:56,355 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 842a4f99d1015855e3e2b86470e8d61b, NAME => 'TestAcidGuarantees,,1732303403914.842a4f99d1015855e3e2b86470e8d61b.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T19:23:56,356 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T19:23:56,356 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732303436356"}]},"ts":"9223372036854775807"} 2024-11-22T19:23:56,357 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T19:23:56,359 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=132, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 44 msec 2024-11-22T19:23:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T19:23:56,418 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-22T19:23:56,427 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=236 (was 237), OpenFileDescriptor=453 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=533 (was 723), ProcessCount=11 (was 11), AvailableMemoryMB=4815 (was 4107) - AvailableMemoryMB LEAK? - 2024-11-22T19:23:56,436 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=236, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=533, ProcessCount=11, AvailableMemoryMB=4815 2024-11-22T19:23:56,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:23:56,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:23:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=133, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T19:23:56,439 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:23:56,439 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:56,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 133 2024-11-22T19:23:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-22T19:23:56,440 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:23:56,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742365_1541 (size=963) 2024-11-22T19:23:56,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-22T19:23:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-22T19:23:56,846 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:23:56,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742366_1542 (size=53) 2024-11-22T19:23:57,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-22T19:23:57,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:23:57,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6827bb75511a85c0992cc4b6522bf5f1, disabling compactions & flushes 2024-11-22T19:23:57,251 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. after waiting 0 ms 2024-11-22T19:23:57,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,251 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:23:57,252 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:23:57,252 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732303437252"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303437252"}]},"ts":"1732303437252"} 2024-11-22T19:23:57,253 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:23:57,254 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:23:57,254 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303437254"}]},"ts":"1732303437254"} 2024-11-22T19:23:57,255 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T19:23:57,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, ASSIGN}] 2024-11-22T19:23:57,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, ASSIGN 2024-11-22T19:23:57,259 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:23:57,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=134 updating hbase:meta row=6827bb75511a85c0992cc4b6522bf5f1, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:57,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; OpenRegionProcedure 6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:23:57,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-22T19:23:57,562 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:57,564 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,564 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7285): Opening region: {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:23:57,565 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,565 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:23:57,565 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7327): checking encryption for 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,565 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7330): checking classloading for 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,566 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,567 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:57,567 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6827bb75511a85c0992cc4b6522bf5f1 columnFamilyName A 2024-11-22T19:23:57,567 DEBUG [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:57,568 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.HStore(327): Store=6827bb75511a85c0992cc4b6522bf5f1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:57,568 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,569 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:57,569 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6827bb75511a85c0992cc4b6522bf5f1 columnFamilyName B 2024-11-22T19:23:57,569 DEBUG [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:57,569 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.HStore(327): Store=6827bb75511a85c0992cc4b6522bf5f1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:57,569 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,570 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:23:57,570 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6827bb75511a85c0992cc4b6522bf5f1 columnFamilyName C 2024-11-22T19:23:57,570 DEBUG [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:23:57,570 INFO [StoreOpener-6827bb75511a85c0992cc4b6522bf5f1-1 {}] regionserver.HStore(327): Store=6827bb75511a85c0992cc4b6522bf5f1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:23:57,571 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,571 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,571 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,572 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:23:57,573 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1085): writing seq id for 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:57,575 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:23:57,575 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1102): Opened 6827bb75511a85c0992cc4b6522bf5f1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72385805, jitterRate=0.07863254845142365}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:23:57,576 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1001): Region open journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:23:57,576 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., pid=135, masterSystemTime=1732303437561 2024-11-22T19:23:57,577 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,577 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:57,578 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=134 updating hbase:meta row=6827bb75511a85c0992cc4b6522bf5f1, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:23:57,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-22T19:23:57,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; OpenRegionProcedure 6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 in 169 msec 2024-11-22T19:23:57,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=133 2024-11-22T19:23:57,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=133, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, ASSIGN in 321 msec 2024-11-22T19:23:57,581 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:23:57,581 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303437581"}]},"ts":"1732303437581"} 2024-11-22T19:23:57,582 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T19:23:57,584 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:23:57,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-11-22T19:23:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-22T19:23:58,543 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 133 completed 2024-11-22T19:23:58,545 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-11-22T19:23:58,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,549 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,550 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,551 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:23:58,552 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:23:58,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-11-22T19:23:58,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,556 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-11-22T19:23:58,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,560 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-11-22T19:23:58,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,563 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-11-22T19:23:58,565 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,566 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-11-22T19:23:58,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-11-22T19:23:58,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,571 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-11-22T19:23:58,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,574 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-11-22T19:23:58,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-11-22T19:23:58,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,581 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-11-22T19:23:58,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:23:58,585 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:23:58,586 DEBUG [hconnection-0x24456e40-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,586 DEBUG [hconnection-0x2374f718-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-22T19:23:58,586 DEBUG [hconnection-0x58114a51-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,587 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,587 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,587 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,587 DEBUG [hconnection-0x5cc6b295-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,588 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:23:58,588 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,588 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:23:58,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:23:58,589 DEBUG [hconnection-0x59cf7dd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:23:58,590 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,592 DEBUG [hconnection-0x6c67357-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,593 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:58,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:23:58,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:23:58,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:58,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:23:58,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:58,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:23:58,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:58,595 DEBUG [hconnection-0x6b1cf8f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,595 DEBUG [hconnection-0xfc4a02c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,596 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41032, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,597 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303498604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303498603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303498604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303498604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303498605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,611 DEBUG [hconnection-0x3150f53d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,612 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41044, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,614 DEBUG [hconnection-0x735d49b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:23:58,616 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:23:58,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/9e987d4c7c8d461c943e79b49e40d5bb is 50, key is test_row_0/A:col10/1732303438593/Put/seqid=0 2024-11-22T19:23:58,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742367_1543 (size=12001) 2024-11-22T19:23:58,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:23:58,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303498705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303498705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303498706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303498706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303498706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:58,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:58,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:58,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:58,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:58,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:58,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:58,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:23:58,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:58,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:58,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:58,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:58,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:58,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:58,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:58,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303498907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303498908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303498908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303498908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:58,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303498909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/9e987d4c7c8d461c943e79b49e40d5bb 2024-11-22T19:23:59,046 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:59,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:59,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:59,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/31f22e06dff8484496ea532b91db8ce7 is 50, key is test_row_0/B:col10/1732303438593/Put/seqid=0 2024-11-22T19:23:59,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742368_1544 (size=12001) 2024-11-22T19:23:59,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/31f22e06dff8484496ea532b91db8ce7 2024-11-22T19:23:59,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e6255914b01b4b33917e0b1387d0e291 is 50, key is test_row_0/C:col10/1732303438593/Put/seqid=0 2024-11-22T19:23:59,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742369_1545 (size=12001) 2024-11-22T19:23:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:23:59,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:59,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:59,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:59,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303499210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303499212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303499212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303499214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303499214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,352 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:59,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:59,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:59,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e6255914b01b4b33917e0b1387d0e291 2024-11-22T19:23:59,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/9e987d4c7c8d461c943e79b49e40d5bb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9e987d4c7c8d461c943e79b49e40d5bb 2024-11-22T19:23:59,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:59,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:59,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:59,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:23:59,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9e987d4c7c8d461c943e79b49e40d5bb, entries=150, sequenceid=13, filesize=11.7 K 2024-11-22T19:23:59,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/31f22e06dff8484496ea532b91db8ce7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/31f22e06dff8484496ea532b91db8ce7 2024-11-22T19:23:59,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/31f22e06dff8484496ea532b91db8ce7, entries=150, sequenceid=13, filesize=11.7 K 2024-11-22T19:23:59,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e6255914b01b4b33917e0b1387d0e291 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e6255914b01b4b33917e0b1387d0e291 2024-11-22T19:23:59,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e6255914b01b4b33917e0b1387d0e291, entries=150, sequenceid=13, filesize=11.7 K 2024-11-22T19:23:59,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 6827bb75511a85c0992cc4b6522bf5f1 in 923ms, sequenceid=13, compaction requested=false 2024-11-22T19:23:59,516 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-22T19:23:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:23:59,658 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:23:59,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:23:59,659 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:23:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:23:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/9beb5a5f64634c0cad50f3a81b1747ef is 50, key is test_row_0/A:col10/1732303438604/Put/seqid=0 2024-11-22T19:23:59,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742370_1546 (size=12001) 2024-11-22T19:23:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:23:59,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:23:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:23:59,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303499722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303499723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303499725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303499726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303499727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303499828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303499830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303499831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:23:59,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:23:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303499834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303500032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303500034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303500035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303500038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,068 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/9beb5a5f64634c0cad50f3a81b1747ef 2024-11-22T19:24:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e644b2aeb299477793f0d38b811b9ff2 is 50, key is test_row_0/B:col10/1732303438604/Put/seqid=0 2024-11-22T19:24:00,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742371_1547 (size=12001) 2024-11-22T19:24:00,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303500338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303500338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303500339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303500343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,479 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e644b2aeb299477793f0d38b811b9ff2 2024-11-22T19:24:00,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/9e0d4a1040574c9cb76b8d6cfd159aca is 50, key is test_row_0/C:col10/1732303438604/Put/seqid=0 2024-11-22T19:24:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742372_1548 (size=12001) 2024-11-22T19:24:00,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:24:00,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303500734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303500842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303500845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303500846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:00,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303500848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:00,893 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/9e0d4a1040574c9cb76b8d6cfd159aca 2024-11-22T19:24:00,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/9beb5a5f64634c0cad50f3a81b1747ef as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9beb5a5f64634c0cad50f3a81b1747ef 2024-11-22T19:24:00,901 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9beb5a5f64634c0cad50f3a81b1747ef, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T19:24:00,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e644b2aeb299477793f0d38b811b9ff2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e644b2aeb299477793f0d38b811b9ff2 2024-11-22T19:24:00,905 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e644b2aeb299477793f0d38b811b9ff2, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T19:24:00,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/9e0d4a1040574c9cb76b8d6cfd159aca as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9e0d4a1040574c9cb76b8d6cfd159aca 2024-11-22T19:24:00,909 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9e0d4a1040574c9cb76b8d6cfd159aca, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T19:24:00,910 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6827bb75511a85c0992cc4b6522bf5f1 in 1251ms, sequenceid=39, compaction requested=false 2024-11-22T19:24:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-22T19:24:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-22T19:24:00,912 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-22T19:24:00,912 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3230 sec 2024-11-22T19:24:00,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.3280 sec 2024-11-22T19:24:01,200 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T19:24:01,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:01,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:01,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:01,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:01,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:01,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:01,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:01,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:01,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/0a2178e712fa48efb5d902b9b6859788 is 50, key is test_row_0/A:col10/1732303439724/Put/seqid=0 2024-11-22T19:24:01,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742373_1549 (size=14341) 2024-11-22T19:24:01,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/0a2178e712fa48efb5d902b9b6859788 2024-11-22T19:24:01,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/b8ef3202a6314068b55c6a6f0b7302aa is 50, key is test_row_0/B:col10/1732303439724/Put/seqid=0 2024-11-22T19:24:01,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742374_1550 (size=12001) 2024-11-22T19:24:01,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303501886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:01,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303501888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303501892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:01,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303501893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:01,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:01,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303501993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:01,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:01,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303501995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303501999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303501999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303502197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303502198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303502201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303502202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/b8ef3202a6314068b55c6a6f0b7302aa 2024-11-22T19:24:02,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e5b491d5b02e4f508349b0dd1f5eba27 is 50, key is test_row_0/C:col10/1732303439724/Put/seqid=0 2024-11-22T19:24:02,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742375_1551 (size=12001) 2024-11-22T19:24:02,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e5b491d5b02e4f508349b0dd1f5eba27 2024-11-22T19:24:02,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/0a2178e712fa48efb5d902b9b6859788 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/0a2178e712fa48efb5d902b9b6859788 2024-11-22T19:24:02,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/0a2178e712fa48efb5d902b9b6859788, entries=200, sequenceid=50, filesize=14.0 K 2024-11-22T19:24:02,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/b8ef3202a6314068b55c6a6f0b7302aa as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/b8ef3202a6314068b55c6a6f0b7302aa 2024-11-22T19:24:02,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/b8ef3202a6314068b55c6a6f0b7302aa, entries=150, sequenceid=50, filesize=11.7 K 2024-11-22T19:24:02,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e5b491d5b02e4f508349b0dd1f5eba27 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e5b491d5b02e4f508349b0dd1f5eba27 2024-11-22T19:24:02,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e5b491d5b02e4f508349b0dd1f5eba27, entries=150, sequenceid=50, filesize=11.7 K 2024-11-22T19:24:02,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6827bb75511a85c0992cc4b6522bf5f1 in 453ms, sequenceid=50, compaction requested=true 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:02,304 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:02,304 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:02,306 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:02,307 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:02,307 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:02,307 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9e987d4c7c8d461c943e79b49e40d5bb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9beb5a5f64634c0cad50f3a81b1747ef, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/0a2178e712fa48efb5d902b9b6859788] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=37.4 K 2024-11-22T19:24:02,307 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:02,307 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:02,307 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:02,307 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/31f22e06dff8484496ea532b91db8ce7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e644b2aeb299477793f0d38b811b9ff2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/b8ef3202a6314068b55c6a6f0b7302aa] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=35.2 K 2024-11-22T19:24:02,307 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e987d4c7c8d461c943e79b49e40d5bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732303438591 2024-11-22T19:24:02,307 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 31f22e06dff8484496ea532b91db8ce7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732303438591 2024-11-22T19:24:02,308 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e644b2aeb299477793f0d38b811b9ff2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732303438603 2024-11-22T19:24:02,308 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9beb5a5f64634c0cad50f3a81b1747ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732303438603 2024-11-22T19:24:02,308 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b8ef3202a6314068b55c6a6f0b7302aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732303439724 2024-11-22T19:24:02,308 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a2178e712fa48efb5d902b9b6859788, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732303439724 2024-11-22T19:24:02,316 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#467 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:02,316 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d08372f09feb46afb269ef8c60135180 is 50, key is test_row_0/A:col10/1732303439724/Put/seqid=0 2024-11-22T19:24:02,316 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:02,317 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/2c4652075cce4406880df8adf008a78d is 50, key is test_row_0/B:col10/1732303439724/Put/seqid=0 2024-11-22T19:24:02,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742376_1552 (size=12104) 2024-11-22T19:24:02,332 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d08372f09feb46afb269ef8c60135180 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d08372f09feb46afb269ef8c60135180 2024-11-22T19:24:02,336 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into d08372f09feb46afb269ef8c60135180(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:02,336 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:02,336 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=13, startTime=1732303442304; duration=0sec 2024-11-22T19:24:02,336 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:02,336 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:02,336 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:02,337 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:02,337 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:02,337 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:02,337 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e6255914b01b4b33917e0b1387d0e291, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9e0d4a1040574c9cb76b8d6cfd159aca, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e5b491d5b02e4f508349b0dd1f5eba27] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=35.2 K 2024-11-22T19:24:02,337 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6255914b01b4b33917e0b1387d0e291, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732303438591 2024-11-22T19:24:02,338 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e0d4a1040574c9cb76b8d6cfd159aca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732303438603 2024-11-22T19:24:02,338 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5b491d5b02e4f508349b0dd1f5eba27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732303439724 2024-11-22T19:24:02,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742377_1553 (size=12104) 2024-11-22T19:24:02,350 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:02,351 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/13fc68e5e5124ec6bc31147734c31761 is 50, key is test_row_0/C:col10/1732303439724/Put/seqid=0 2024-11-22T19:24:02,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742378_1554 (size=12104) 2024-11-22T19:24:02,358 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/13fc68e5e5124ec6bc31147734c31761 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13fc68e5e5124ec6bc31147734c31761 2024-11-22T19:24:02,363 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 13fc68e5e5124ec6bc31147734c31761(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:02,363 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:02,363 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=13, startTime=1732303442304; duration=0sec 2024-11-22T19:24:02,363 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:02,363 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:02,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T19:24:02,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:02,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:02,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:02,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:02,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:02,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:02,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/ce81e3b114de497a830103edaeea9cdd is 50, key is test_row_0/A:col10/1732303442505/Put/seqid=0 2024-11-22T19:24:02,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742379_1555 (size=14341) 2024-11-22T19:24:02,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303502512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303502513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303502514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303502518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303502619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303502619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303502619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303502623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T19:24:02,700 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-22T19:24:02,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:02,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-22T19:24:02,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:02,703 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:02,703 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:02,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:02,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303502737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,740 DEBUG [Thread-2379 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:02,754 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/2c4652075cce4406880df8adf008a78d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2c4652075cce4406880df8adf008a78d 2024-11-22T19:24:02,758 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into 2c4652075cce4406880df8adf008a78d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:02,758 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:02,758 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=13, startTime=1732303442304; duration=0sec 2024-11-22T19:24:02,758 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:02,758 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:02,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303502823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303502824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303502825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:02,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303502828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:02,855 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:02,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:02,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:02,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:02,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:02,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:02,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:02,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:02,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/ce81e3b114de497a830103edaeea9cdd 2024-11-22T19:24:02,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/1c53978140ce4681896de4ae709922ce is 50, key is test_row_0/B:col10/1732303442505/Put/seqid=0 2024-11-22T19:24:02,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742380_1556 (size=12001) 2024-11-22T19:24:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:03,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:03,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:03,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:03,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303503127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303503128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303503129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303503131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:03,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:03,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:03,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:03,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:03,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:03,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:03,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/1c53978140ce4681896de4ae709922ce 2024-11-22T19:24:03,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/bd8a3c3cb75142e69d0ad2e297a36606 is 50, key is test_row_0/C:col10/1732303442505/Put/seqid=0 2024-11-22T19:24:03,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742381_1557 (size=12001) 2024-11-22T19:24:03,466 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:03,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:03,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:03,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:03,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:03,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:03,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:03,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303503632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303503634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303503632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303503635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:03,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/bd8a3c3cb75142e69d0ad2e297a36606 2024-11-22T19:24:03,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/ce81e3b114de497a830103edaeea9cdd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ce81e3b114de497a830103edaeea9cdd 2024-11-22T19:24:03,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ce81e3b114de497a830103edaeea9cdd, entries=200, sequenceid=80, filesize=14.0 K 2024-11-22T19:24:03,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/1c53978140ce4681896de4ae709922ce as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/1c53978140ce4681896de4ae709922ce 2024-11-22T19:24:03,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/1c53978140ce4681896de4ae709922ce, entries=150, sequenceid=80, filesize=11.7 K 2024-11-22T19:24:03,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/bd8a3c3cb75142e69d0ad2e297a36606 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/bd8a3c3cb75142e69d0ad2e297a36606 2024-11-22T19:24:03,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/bd8a3c3cb75142e69d0ad2e297a36606, entries=150, sequenceid=80, filesize=11.7 K 2024-11-22T19:24:03,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6827bb75511a85c0992cc4b6522bf5f1 in 1247ms, sequenceid=80, compaction requested=false 2024-11-22T19:24:03,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:03,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:03,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T19:24:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:03,773 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T19:24:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:03,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/00333b8e803940e496a50d5442dce0ad is 50, key is test_row_0/A:col10/1732303442511/Put/seqid=0 2024-11-22T19:24:03,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742382_1558 (size=12001) 2024-11-22T19:24:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:04,182 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/00333b8e803940e496a50d5442dce0ad 2024-11-22T19:24:04,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e8679a54ad6946c19b9f7af0226443b5 is 50, key is test_row_0/B:col10/1732303442511/Put/seqid=0 2024-11-22T19:24:04,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742383_1559 (size=12001) 2024-11-22T19:24:04,592 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e8679a54ad6946c19b9f7af0226443b5 2024-11-22T19:24:04,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/dc5e95e41a764baba391464374fdd240 is 50, key is test_row_0/C:col10/1732303442511/Put/seqid=0 2024-11-22T19:24:04,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742384_1560 (size=12001) 2024-11-22T19:24:04,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:04,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:04,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303504698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303504701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303504702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303504702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:04,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303504807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303504807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303504808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:04,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:04,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303504808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,006 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/dc5e95e41a764baba391464374fdd240 2024-11-22T19:24:05,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/00333b8e803940e496a50d5442dce0ad as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/00333b8e803940e496a50d5442dce0ad 2024-11-22T19:24:05,014 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/00333b8e803940e496a50d5442dce0ad, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:24:05,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e8679a54ad6946c19b9f7af0226443b5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e8679a54ad6946c19b9f7af0226443b5 2024-11-22T19:24:05,017 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e8679a54ad6946c19b9f7af0226443b5, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:24:05,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303505012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303505012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/dc5e95e41a764baba391464374fdd240 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/dc5e95e41a764baba391464374fdd240 2024-11-22T19:24:05,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303505013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303505014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,021 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/dc5e95e41a764baba391464374fdd240, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T19:24:05,021 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 6827bb75511a85c0992cc4b6522bf5f1 in 1248ms, sequenceid=89, compaction requested=true 2024-11-22T19:24:05,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:05,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:05,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-22T19:24:05,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-22T19:24:05,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-22T19:24:05,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3180 sec 2024-11-22T19:24:05,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.3230 sec 2024-11-22T19:24:05,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:05,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T19:24:05,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:05,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:05,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:05,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:05,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:05,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:05,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/32bbdd23ca0c4404abb3eda20ebd7cf2 is 50, key is test_row_0/A:col10/1732303444697/Put/seqid=0 2024-11-22T19:24:05,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303505327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303505330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303505330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742385_1561 (size=14341) 2024-11-22T19:24:05,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/32bbdd23ca0c4404abb3eda20ebd7cf2 2024-11-22T19:24:05,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303505334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e675b299ccdd4077b0d090c0f254919d is 50, key is test_row_0/B:col10/1732303444697/Put/seqid=0 2024-11-22T19:24:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742386_1562 (size=12001) 2024-11-22T19:24:05,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e675b299ccdd4077b0d090c0f254919d 2024-11-22T19:24:05,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/2b3c7e3f76e84804a0be0fac44b6f9c6 is 50, key is test_row_0/C:col10/1732303444697/Put/seqid=0 2024-11-22T19:24:05,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742387_1563 (size=12001) 2024-11-22T19:24:05,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/2b3c7e3f76e84804a0be0fac44b6f9c6 2024-11-22T19:24:05,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/32bbdd23ca0c4404abb3eda20ebd7cf2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/32bbdd23ca0c4404abb3eda20ebd7cf2 2024-11-22T19:24:05,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/32bbdd23ca0c4404abb3eda20ebd7cf2, entries=200, sequenceid=118, filesize=14.0 K 2024-11-22T19:24:05,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/e675b299ccdd4077b0d090c0f254919d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e675b299ccdd4077b0d090c0f254919d 2024-11-22T19:24:05,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e675b299ccdd4077b0d090c0f254919d, entries=150, sequenceid=118, filesize=11.7 K 2024-11-22T19:24:05,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/2b3c7e3f76e84804a0be0fac44b6f9c6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/2b3c7e3f76e84804a0be0fac44b6f9c6 2024-11-22T19:24:05,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/2b3c7e3f76e84804a0be0fac44b6f9c6, entries=150, sequenceid=118, filesize=11.7 K 2024-11-22T19:24:05,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 6827bb75511a85c0992cc4b6522bf5f1 in 50ms, sequenceid=118, compaction requested=true 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:05,375 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:05,375 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:05,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:05,376 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:05,376 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:05,376 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:05,376 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:05,377 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:05,377 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:05,377 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2c4652075cce4406880df8adf008a78d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/1c53978140ce4681896de4ae709922ce, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e8679a54ad6946c19b9f7af0226443b5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e675b299ccdd4077b0d090c0f254919d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=47.0 K 2024-11-22T19:24:05,377 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d08372f09feb46afb269ef8c60135180, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ce81e3b114de497a830103edaeea9cdd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/00333b8e803940e496a50d5442dce0ad, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/32bbdd23ca0c4404abb3eda20ebd7cf2] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=51.5 K 2024-11-22T19:24:05,377 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d08372f09feb46afb269ef8c60135180, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732303439724 2024-11-22T19:24:05,377 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c4652075cce4406880df8adf008a78d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732303439724 2024-11-22T19:24:05,377 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c53978140ce4681896de4ae709922ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732303441891 2024-11-22T19:24:05,377 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce81e3b114de497a830103edaeea9cdd, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732303441881 2024-11-22T19:24:05,378 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e8679a54ad6946c19b9f7af0226443b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303442511 2024-11-22T19:24:05,378 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00333b8e803940e496a50d5442dce0ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303442511 2024-11-22T19:24:05,378 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32bbdd23ca0c4404abb3eda20ebd7cf2, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732303444697 2024-11-22T19:24:05,378 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e675b299ccdd4077b0d090c0f254919d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732303444697 2024-11-22T19:24:05,386 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#479 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:05,386 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/0ef55204491d45f394ac9cf0f7c7d05e is 50, key is test_row_0/B:col10/1732303444697/Put/seqid=0 2024-11-22T19:24:05,388 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:05,389 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/3a4fa3f7f84b4295a452428cb61419f7 is 50, key is test_row_0/A:col10/1732303444697/Put/seqid=0 2024-11-22T19:24:05,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742389_1565 (size=12241) 2024-11-22T19:24:05,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742388_1564 (size=12241) 2024-11-22T19:24:05,398 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/3a4fa3f7f84b4295a452428cb61419f7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/3a4fa3f7f84b4295a452428cb61419f7 2024-11-22T19:24:05,402 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into 3a4fa3f7f84b4295a452428cb61419f7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:05,402 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:05,402 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=12, startTime=1732303445375; duration=0sec 2024-11-22T19:24:05,402 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:05,402 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:05,402 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:05,403 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:05,403 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:05,403 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:05,403 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13fc68e5e5124ec6bc31147734c31761, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/bd8a3c3cb75142e69d0ad2e297a36606, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/dc5e95e41a764baba391464374fdd240, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/2b3c7e3f76e84804a0be0fac44b6f9c6] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=47.0 K 2024-11-22T19:24:05,403 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13fc68e5e5124ec6bc31147734c31761, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732303439724 2024-11-22T19:24:05,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd8a3c3cb75142e69d0ad2e297a36606, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732303441891 2024-11-22T19:24:05,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc5e95e41a764baba391464374fdd240, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732303442511 2024-11-22T19:24:05,404 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b3c7e3f76e84804a0be0fac44b6f9c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732303444697 2024-11-22T19:24:05,412 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#481 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:05,412 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/82bd192230b745efb5cad47ac5cf57eb is 50, key is test_row_0/C:col10/1732303444697/Put/seqid=0 2024-11-22T19:24:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742390_1566 (size=12241) 2024-11-22T19:24:05,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:05,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:05,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e3f9cdc067614acf90d95cb4f9f2a279 is 50, key is test_row_0/A:col10/1732303445443/Put/seqid=0 2024-11-22T19:24:05,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742391_1567 (size=16831) 2024-11-22T19:24:05,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303505478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303505479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303505481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303505482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303505583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303505586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303505587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303505590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303505788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303505790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303505791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,799 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/0ef55204491d45f394ac9cf0f7c7d05e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/0ef55204491d45f394ac9cf0f7c7d05e 2024-11-22T19:24:05,803 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into 0ef55204491d45f394ac9cf0f7c7d05e(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:05,803 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:05,803 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=12, startTime=1732303445375; duration=0sec 2024-11-22T19:24:05,803 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:05,803 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:05,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303505798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:05,820 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/82bd192230b745efb5cad47ac5cf57eb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/82bd192230b745efb5cad47ac5cf57eb 2024-11-22T19:24:05,823 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 82bd192230b745efb5cad47ac5cf57eb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:05,823 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:05,823 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=12, startTime=1732303445375; duration=0sec 2024-11-22T19:24:05,823 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:05,823 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:05,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e3f9cdc067614acf90d95cb4f9f2a279 2024-11-22T19:24:05,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c6ea1ab157374cd6a2a8c89d1b234f7b is 50, key is test_row_0/B:col10/1732303445443/Put/seqid=0 2024-11-22T19:24:05,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742392_1568 (size=12101) 2024-11-22T19:24:05,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c6ea1ab157374cd6a2a8c89d1b234f7b 2024-11-22T19:24:05,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/48ab642b4f7243b4be78d3c4cfb9a053 is 50, key is test_row_0/C:col10/1732303445443/Put/seqid=0 2024-11-22T19:24:05,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742393_1569 (size=12101) 2024-11-22T19:24:06,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303506097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303506099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303506100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303506108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/48ab642b4f7243b4be78d3c4cfb9a053 2024-11-22T19:24:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e3f9cdc067614acf90d95cb4f9f2a279 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e3f9cdc067614acf90d95cb4f9f2a279 2024-11-22T19:24:06,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e3f9cdc067614acf90d95cb4f9f2a279, entries=250, sequenceid=131, filesize=16.4 K 2024-11-22T19:24:06,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c6ea1ab157374cd6a2a8c89d1b234f7b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c6ea1ab157374cd6a2a8c89d1b234f7b 2024-11-22T19:24:06,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c6ea1ab157374cd6a2a8c89d1b234f7b, entries=150, sequenceid=131, filesize=11.8 K 2024-11-22T19:24:06,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/48ab642b4f7243b4be78d3c4cfb9a053 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/48ab642b4f7243b4be78d3c4cfb9a053 2024-11-22T19:24:06,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/48ab642b4f7243b4be78d3c4cfb9a053, entries=150, sequenceid=131, filesize=11.8 K 2024-11-22T19:24:06,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6827bb75511a85c0992cc4b6522bf5f1 in 851ms, sequenceid=131, compaction requested=false 2024-11-22T19:24:06,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:06,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:06,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:24:06,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:06,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:06,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:06,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:06,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:06,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:06,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d09dd1cfe09b4385baefdd69cf5a86b7 is 50, key is test_row_0/A:col10/1732303446604/Put/seqid=0 2024-11-22T19:24:06,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303506616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742394_1570 (size=12151) 2024-11-22T19:24:06,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303506620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303506620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303506621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303506725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303506728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303506729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303506754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,756 DEBUG [Thread-2379 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T19:24:06,807 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-22T19:24:06,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-22T19:24:06,809 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:06,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T19:24:06,810 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:06,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:06,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T19:24:06,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303506930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303506930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:06,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303506933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:06,961 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:06,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:06,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d09dd1cfe09b4385baefdd69cf5a86b7 2024-11-22T19:24:07,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c41dde311eb1433aa7ecd9a408ab96d1 is 50, key is test_row_0/B:col10/1732303446604/Put/seqid=0 2024-11-22T19:24:07,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742395_1571 (size=12151) 2024-11-22T19:24:07,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T19:24:07,114 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:07,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:07,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:07,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303507233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303507237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303507238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:07,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:07,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:07,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T19:24:07,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:07,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:07,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:07,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c41dde311eb1433aa7ecd9a408ab96d1 2024-11-22T19:24:07,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/4980a83ca0174b658c0da554b4a4d469 is 50, key is test_row_0/C:col10/1732303446604/Put/seqid=0 2024-11-22T19:24:07,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742396_1572 (size=12151) 2024-11-22T19:24:07,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:07,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:07,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:07,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303507626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:07,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:07,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:07,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303507739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303507741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:07,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303507742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:07,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/4980a83ca0174b658c0da554b4a4d469 2024-11-22T19:24:07,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d09dd1cfe09b4385baefdd69cf5a86b7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d09dd1cfe09b4385baefdd69cf5a86b7 2024-11-22T19:24:07,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d09dd1cfe09b4385baefdd69cf5a86b7, entries=150, sequenceid=158, filesize=11.9 K 2024-11-22T19:24:07,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c41dde311eb1433aa7ecd9a408ab96d1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c41dde311eb1433aa7ecd9a408ab96d1 2024-11-22T19:24:07,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c41dde311eb1433aa7ecd9a408ab96d1, entries=150, sequenceid=158, filesize=11.9 K 2024-11-22T19:24:07,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/4980a83ca0174b658c0da554b4a4d469 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4980a83ca0174b658c0da554b4a4d469 2024-11-22T19:24:07,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4980a83ca0174b658c0da554b4a4d469, entries=150, sequenceid=158, filesize=11.9 K 2024-11-22T19:24:07,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 6827bb75511a85c0992cc4b6522bf5f1 in 1256ms, sequenceid=158, compaction requested=true 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:07,861 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:07,861 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:07,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:07,862 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:07,862 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41223 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:07,862 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:07,862 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:07,862 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,862 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,862 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/0ef55204491d45f394ac9cf0f7c7d05e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c6ea1ab157374cd6a2a8c89d1b234f7b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c41dde311eb1433aa7ecd9a408ab96d1] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=35.6 K 2024-11-22T19:24:07,862 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/3a4fa3f7f84b4295a452428cb61419f7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e3f9cdc067614acf90d95cb4f9f2a279, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d09dd1cfe09b4385baefdd69cf5a86b7] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=40.3 K 2024-11-22T19:24:07,863 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ef55204491d45f394ac9cf0f7c7d05e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732303444697 2024-11-22T19:24:07,863 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a4fa3f7f84b4295a452428cb61419f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732303444697 2024-11-22T19:24:07,863 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c6ea1ab157374cd6a2a8c89d1b234f7b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732303445439 2024-11-22T19:24:07,863 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3f9cdc067614acf90d95cb4f9f2a279, keycount=250, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732303445438 2024-11-22T19:24:07,863 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c41dde311eb1433aa7ecd9a408ab96d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732303445471 2024-11-22T19:24:07,863 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d09dd1cfe09b4385baefdd69cf5a86b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732303445471 2024-11-22T19:24:07,871 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#488 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:07,872 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#489 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:07,872 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/4912e36db8774a449758a0e43813c869 is 50, key is test_row_0/B:col10/1732303446604/Put/seqid=0 2024-11-22T19:24:07,872 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b36fe71b6cd44ac99c3f8d11bd695e29 is 50, key is test_row_0/A:col10/1732303446604/Put/seqid=0 2024-11-22T19:24:07,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742397_1573 (size=12493) 2024-11-22T19:24:07,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:07,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T19:24:07,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,878 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:24:07,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:07,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:07,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:07,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:07,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:07,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:07,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742398_1574 (size=12493) 2024-11-22T19:24:07,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/37686381074b43109ae4eb1a8c1b71a8 is 50, key is test_row_0/A:col10/1732303446619/Put/seqid=0 2024-11-22T19:24:07,888 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b36fe71b6cd44ac99c3f8d11bd695e29 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b36fe71b6cd44ac99c3f8d11bd695e29 2024-11-22T19:24:07,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742399_1575 (size=12151) 2024-11-22T19:24:07,891 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/37686381074b43109ae4eb1a8c1b71a8 2024-11-22T19:24:07,893 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into b36fe71b6cd44ac99c3f8d11bd695e29(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:07,893 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:07,893 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=13, startTime=1732303447861; duration=0sec 2024-11-22T19:24:07,894 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:07,894 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:07,894 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:07,896 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:07,896 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:07,896 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:07,896 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/82bd192230b745efb5cad47ac5cf57eb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/48ab642b4f7243b4be78d3c4cfb9a053, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4980a83ca0174b658c0da554b4a4d469] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=35.6 K 2024-11-22T19:24:07,897 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82bd192230b745efb5cad47ac5cf57eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732303444697 2024-11-22T19:24:07,897 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48ab642b4f7243b4be78d3c4cfb9a053, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732303445439 2024-11-22T19:24:07,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c8ae318e4da7447ca879f2d0635f4476 is 50, key is test_row_0/B:col10/1732303446619/Put/seqid=0 2024-11-22T19:24:07,899 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4980a83ca0174b658c0da554b4a4d469, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732303445471 2024-11-22T19:24:07,904 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#492 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:07,904 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/08c40c43e490482cb492ff336d0b8888 is 50, key is test_row_0/C:col10/1732303446604/Put/seqid=0 2024-11-22T19:24:07,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742401_1577 (size=12493) 2024-11-22T19:24:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T19:24:07,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742400_1576 (size=12151) 2024-11-22T19:24:08,281 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/4912e36db8774a449758a0e43813c869 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/4912e36db8774a449758a0e43813c869 2024-11-22T19:24:08,285 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into 4912e36db8774a449758a0e43813c869(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:08,285 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:08,285 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=13, startTime=1732303447861; duration=0sec 2024-11-22T19:24:08,285 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:08,285 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:08,312 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/08c40c43e490482cb492ff336d0b8888 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/08c40c43e490482cb492ff336d0b8888 2024-11-22T19:24:08,315 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 08c40c43e490482cb492ff336d0b8888(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:08,316 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:08,316 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=13, startTime=1732303447861; duration=0sec 2024-11-22T19:24:08,316 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:08,316 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:08,317 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c8ae318e4da7447ca879f2d0635f4476 2024-11-22T19:24:08,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/3dc40ab4b6d24720abdfd98c08c54025 is 50, key is test_row_0/C:col10/1732303446619/Put/seqid=0 2024-11-22T19:24:08,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742402_1578 (size=12151) 2024-11-22T19:24:08,726 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/3dc40ab4b6d24720abdfd98c08c54025 2024-11-22T19:24:08,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/37686381074b43109ae4eb1a8c1b71a8 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/37686381074b43109ae4eb1a8c1b71a8 2024-11-22T19:24:08,733 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/37686381074b43109ae4eb1a8c1b71a8, entries=150, sequenceid=170, filesize=11.9 K 2024-11-22T19:24:08,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c8ae318e4da7447ca879f2d0635f4476 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c8ae318e4da7447ca879f2d0635f4476 2024-11-22T19:24:08,737 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c8ae318e4da7447ca879f2d0635f4476, entries=150, sequenceid=170, filesize=11.9 K 2024-11-22T19:24:08,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/3dc40ab4b6d24720abdfd98c08c54025 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/3dc40ab4b6d24720abdfd98c08c54025 2024-11-22T19:24:08,741 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/3dc40ab4b6d24720abdfd98c08c54025, entries=150, sequenceid=170, filesize=11.9 K 2024-11-22T19:24:08,742 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 6827bb75511a85c0992cc4b6522bf5f1 in 864ms, sequenceid=170, compaction requested=false 2024-11-22T19:24:08,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:08,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:08,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-22T19:24:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-22T19:24:08,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-22T19:24:08,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9330 sec 2024-11-22T19:24:08,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.9370 sec 2024-11-22T19:24:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:08,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:24:08,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:08,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:08,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:08,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:08,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:08,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:08,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e56f50ee242b4d82aeba9fe0031b9713 is 50, key is test_row_0/A:col10/1732303448756/Put/seqid=0 2024-11-22T19:24:08,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742403_1579 (size=16931) 2024-11-22T19:24:08,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e56f50ee242b4d82aeba9fe0031b9713 2024-11-22T19:24:08,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/174e88a402be41feaff4195b9413cab5 is 50, key is test_row_0/B:col10/1732303448756/Put/seqid=0 2024-11-22T19:24:08,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742404_1580 (size=12151) 2024-11-22T19:24:08,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303508787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:08,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303508790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:08,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303508793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:08,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303508894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:08,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303508895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:08,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303508898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T19:24:08,914 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-22T19:24:08,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-22T19:24:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:08,917 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:08,917 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:08,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:09,069 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303509098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303509100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303509100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/174e88a402be41feaff4195b9413cab5 2024-11-22T19:24:09,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/ee933004c3cc42ec8ea22833bae22e26 is 50, key is test_row_0/C:col10/1732303448756/Put/seqid=0 2024-11-22T19:24:09,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742405_1581 (size=12151) 2024-11-22T19:24:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:09,221 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,374 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303509406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303509406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303509407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:09,527 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/ee933004c3cc42ec8ea22833bae22e26 2024-11-22T19:24:09,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e56f50ee242b4d82aeba9fe0031b9713 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e56f50ee242b4d82aeba9fe0031b9713 2024-11-22T19:24:09,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e56f50ee242b4d82aeba9fe0031b9713, entries=250, sequenceid=185, filesize=16.5 K 2024-11-22T19:24:09,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/174e88a402be41feaff4195b9413cab5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/174e88a402be41feaff4195b9413cab5 2024-11-22T19:24:09,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/174e88a402be41feaff4195b9413cab5, entries=150, sequenceid=185, filesize=11.9 K 2024-11-22T19:24:09,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/ee933004c3cc42ec8ea22833bae22e26 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/ee933004c3cc42ec8ea22833bae22e26 2024-11-22T19:24:09,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/ee933004c3cc42ec8ea22833bae22e26, entries=150, sequenceid=185, filesize=11.9 K 2024-11-22T19:24:09,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6827bb75511a85c0992cc4b6522bf5f1 in 845ms, sequenceid=185, compaction requested=true 2024-11-22T19:24:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:09,602 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:09,602 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:09,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:09,603 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:09,603 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:09,603 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:09,603 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:09,603 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,603 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,603 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/4912e36db8774a449758a0e43813c869, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c8ae318e4da7447ca879f2d0635f4476, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/174e88a402be41feaff4195b9413cab5] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=35.9 K 2024-11-22T19:24:09,604 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b36fe71b6cd44ac99c3f8d11bd695e29, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/37686381074b43109ae4eb1a8c1b71a8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e56f50ee242b4d82aeba9fe0031b9713] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=40.6 K 2024-11-22T19:24:09,604 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b36fe71b6cd44ac99c3f8d11bd695e29, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732303445471 2024-11-22T19:24:09,604 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4912e36db8774a449758a0e43813c869, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732303445471 2024-11-22T19:24:09,604 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37686381074b43109ae4eb1a8c1b71a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732303446612 2024-11-22T19:24:09,604 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c8ae318e4da7447ca879f2d0635f4476, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732303446612 2024-11-22T19:24:09,608 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e56f50ee242b4d82aeba9fe0031b9713, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732303448751 2024-11-22T19:24:09,608 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 174e88a402be41feaff4195b9413cab5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732303448751 2024-11-22T19:24:09,619 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#498 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:09,620 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/6f8221221d5b4edea648e4c69837ed85 is 50, key is test_row_0/B:col10/1732303448756/Put/seqid=0 2024-11-22T19:24:09,620 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#497 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:09,620 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/fe322ee00e4f45ec87895e2d85b45de0 is 50, key is test_row_0/A:col10/1732303448756/Put/seqid=0 2024-11-22T19:24:09,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742406_1582 (size=12595) 2024-11-22T19:24:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742407_1583 (size=12595) 2024-11-22T19:24:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:09,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:24:09,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:09,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:09,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:09,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:09,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:09,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:09,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/1319214680cf45beae1d0a38f6ab07fe is 50, key is test_row_0/A:col10/1732303449646/Put/seqid=0 2024-11-22T19:24:09,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742408_1584 (size=12151) 2024-11-22T19:24:09,679 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303509684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303509787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,832 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303509911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303509913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303509915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:09,985 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:09,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:09,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:09,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:09,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:09,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303509993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:10,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:10,029 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/6f8221221d5b4edea648e4c69837ed85 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/6f8221221d5b4edea648e4c69837ed85 2024-11-22T19:24:10,029 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/fe322ee00e4f45ec87895e2d85b45de0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/fe322ee00e4f45ec87895e2d85b45de0 2024-11-22T19:24:10,033 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into 6f8221221d5b4edea648e4c69837ed85(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:10,033 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:10,033 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=13, startTime=1732303449602; duration=0sec 2024-11-22T19:24:10,033 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:10,033 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:10,033 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:10,034 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into fe322ee00e4f45ec87895e2d85b45de0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:10,034 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:10,034 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=13, startTime=1732303449602; duration=0sec 2024-11-22T19:24:10,034 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:10,034 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:10,034 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:10,034 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:10,034 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,034 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/08c40c43e490482cb492ff336d0b8888, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/3dc40ab4b6d24720abdfd98c08c54025, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/ee933004c3cc42ec8ea22833bae22e26] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=35.9 K 2024-11-22T19:24:10,035 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 08c40c43e490482cb492ff336d0b8888, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732303445471 2024-11-22T19:24:10,035 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dc40ab4b6d24720abdfd98c08c54025, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732303446612 2024-11-22T19:24:10,035 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ee933004c3cc42ec8ea22833bae22e26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732303448751 2024-11-22T19:24:10,041 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#500 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:10,042 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/52bc6f8b327240dfb167f52c2accb316 is 50, key is test_row_0/C:col10/1732303448756/Put/seqid=0 2024-11-22T19:24:10,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742409_1585 (size=12595) 2024-11-22T19:24:10,048 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/52bc6f8b327240dfb167f52c2accb316 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/52bc6f8b327240dfb167f52c2accb316 2024-11-22T19:24:10,052 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 52bc6f8b327240dfb167f52c2accb316(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:10,052 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:10,052 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=13, startTime=1732303449602; duration=0sec 2024-11-22T19:24:10,052 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:10,052 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:10,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/1319214680cf45beae1d0a38f6ab07fe 2024-11-22T19:24:10,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/ed4bbe3e868c433590e529a4343a50b5 is 50, key is test_row_0/B:col10/1732303449646/Put/seqid=0 2024-11-22T19:24:10,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742410_1586 (size=12151) 2024-11-22T19:24:10,137 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:10,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,290 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:10,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303510299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:10,443 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:10,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:10,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:10,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/ed4bbe3e868c433590e529a4343a50b5 2024-11-22T19:24:10,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e3c192bd62fe460ba0f7c5384adfeff5 is 50, key is test_row_0/C:col10/1732303449646/Put/seqid=0 2024-11-22T19:24:10,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742411_1587 (size=12151) 2024-11-22T19:24:10,596 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:10,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:10,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:10,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,748 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:10,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:10,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:10,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:10,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303510801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:10,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e3c192bd62fe460ba0f7c5384adfeff5 2024-11-22T19:24:10,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/1319214680cf45beae1d0a38f6ab07fe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1319214680cf45beae1d0a38f6ab07fe 2024-11-22T19:24:10,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1319214680cf45beae1d0a38f6ab07fe, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:24:10,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/ed4bbe3e868c433590e529a4343a50b5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ed4bbe3e868c433590e529a4343a50b5 2024-11-22T19:24:10,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ed4bbe3e868c433590e529a4343a50b5, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:24:10,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/e3c192bd62fe460ba0f7c5384adfeff5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e3c192bd62fe460ba0f7c5384adfeff5 2024-11-22T19:24:10,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e3c192bd62fe460ba0f7c5384adfeff5, entries=150, sequenceid=210, filesize=11.9 K 2024-11-22T19:24:10,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6827bb75511a85c0992cc4b6522bf5f1 in 1252ms, sequenceid=210, compaction requested=false 2024-11-22T19:24:10,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:10,901 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:10,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:10,902 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:10,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/768a60926245441798f8f1b785414b92 is 50, key is test_row_0/A:col10/1732303449679/Put/seqid=0 2024-11-22T19:24:10,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742412_1588 (size=9757) 2024-11-22T19:24:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:10,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:10,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:10,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303510955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303510955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:10,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:10,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303510958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:11,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303511060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303511060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303511063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303511262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303511263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303511267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,310 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/768a60926245441798f8f1b785414b92 2024-11-22T19:24:11,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/3fd28fc23fb04194bbf87caf65dfcb46 is 50, key is test_row_0/B:col10/1732303449679/Put/seqid=0 2024-11-22T19:24:11,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742413_1589 (size=9757) 2024-11-22T19:24:11,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303511569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303511569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303511576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:11,721 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/3fd28fc23fb04194bbf87caf65dfcb46 2024-11-22T19:24:11,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/4ff5fc1f70134144abb13d372346f58a is 50, key is test_row_0/C:col10/1732303449679/Put/seqid=0 2024-11-22T19:24:11,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742414_1590 (size=9757) 2024-11-22T19:24:11,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303511813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:12,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:12,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303512074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:12,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303512076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:12,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:12,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303512080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:12,131 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/4ff5fc1f70134144abb13d372346f58a 2024-11-22T19:24:12,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/768a60926245441798f8f1b785414b92 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/768a60926245441798f8f1b785414b92 2024-11-22T19:24:12,139 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/768a60926245441798f8f1b785414b92, entries=100, sequenceid=224, filesize=9.5 K 2024-11-22T19:24:12,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/3fd28fc23fb04194bbf87caf65dfcb46 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/3fd28fc23fb04194bbf87caf65dfcb46 2024-11-22T19:24:12,144 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/3fd28fc23fb04194bbf87caf65dfcb46, entries=100, sequenceid=224, filesize=9.5 K 2024-11-22T19:24:12,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/4ff5fc1f70134144abb13d372346f58a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4ff5fc1f70134144abb13d372346f58a 2024-11-22T19:24:12,147 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4ff5fc1f70134144abb13d372346f58a, entries=100, sequenceid=224, filesize=9.5 K 2024-11-22T19:24:12,148 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6827bb75511a85c0992cc4b6522bf5f1 in 1246ms, sequenceid=224, compaction requested=true 2024-11-22T19:24:12,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:12,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:12,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-22T19:24:12,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-22T19:24:12,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-22T19:24:12,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2320 sec 2024-11-22T19:24:12,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 3.2340 sec 2024-11-22T19:24:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T19:24:13,021 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-22T19:24:13,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-22T19:24:13,024 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:13,025 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T19:24:13,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:13,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T19:24:13,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:13,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:13,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:13,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:13,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:13,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:13,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/01bca872158f45e2973075e8800ff715 is 50, key is test_row_0/A:col10/1732303450957/Put/seqid=0 2024-11-22T19:24:13,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742415_1591 (size=14541) 2024-11-22T19:24:13,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303513098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303513105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303513106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T19:24:13,176 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:13,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T19:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303513206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303513211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303513211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T19:24:13,329 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:13,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T19:24:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303513411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303513416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303513416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,481 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:13,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T19:24:13,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:13,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/01bca872158f45e2973075e8800ff715 2024-11-22T19:24:13,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/d36dfc16b900422d97324f65bb8ec32d is 50, key is test_row_0/B:col10/1732303450957/Put/seqid=0 2024-11-22T19:24:13,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742416_1592 (size=12151) 2024-11-22T19:24:13,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/d36dfc16b900422d97324f65bb8ec32d 2024-11-22T19:24:13,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/9ba866c092bb4a49bd215245827f9467 is 50, key is test_row_0/C:col10/1732303450957/Put/seqid=0 2024-11-22T19:24:13,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742417_1593 (size=12151) 2024-11-22T19:24:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T19:24:13,634 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:13,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T19:24:13,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:13,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303513716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303513721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303513723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,787 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:13,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T19:24:13,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:13,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:13,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303513828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:13,831 DEBUG [Thread-2375 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:13,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/9ba866c092bb4a49bd215245827f9467 2024-11-22T19:24:13,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/01bca872158f45e2973075e8800ff715 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/01bca872158f45e2973075e8800ff715 2024-11-22T19:24:13,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/01bca872158f45e2973075e8800ff715, entries=200, sequenceid=250, filesize=14.2 K 2024-11-22T19:24:13,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/d36dfc16b900422d97324f65bb8ec32d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d36dfc16b900422d97324f65bb8ec32d 2024-11-22T19:24:13,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d36dfc16b900422d97324f65bb8ec32d, entries=150, sequenceid=250, filesize=11.9 K 2024-11-22T19:24:13,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/9ba866c092bb4a49bd215245827f9467 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9ba866c092bb4a49bd215245827f9467 2024-11-22T19:24:13,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9ba866c092bb4a49bd215245827f9467, entries=150, sequenceid=250, filesize=11.9 K 2024-11-22T19:24:13,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 6827bb75511a85c0992cc4b6522bf5f1 in 850ms, sequenceid=250, compaction requested=true 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:13,937 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:13,937 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:13,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46654 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:13,938 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49044 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:13,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:13,938 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,938 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:13,938 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,938 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/6f8221221d5b4edea648e4c69837ed85, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ed4bbe3e868c433590e529a4343a50b5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/3fd28fc23fb04194bbf87caf65dfcb46, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d36dfc16b900422d97324f65bb8ec32d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=45.6 K 2024-11-22T19:24:13,938 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/fe322ee00e4f45ec87895e2d85b45de0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1319214680cf45beae1d0a38f6ab07fe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/768a60926245441798f8f1b785414b92, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/01bca872158f45e2973075e8800ff715] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=47.9 K 2024-11-22T19:24:13,938 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f8221221d5b4edea648e4c69837ed85, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732303448751 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe322ee00e4f45ec87895e2d85b45de0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732303448751 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ed4bbe3e868c433590e529a4343a50b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303448783 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1319214680cf45beae1d0a38f6ab07fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303448783 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 768a60926245441798f8f1b785414b92, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732303449665 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fd28fc23fb04194bbf87caf65dfcb46, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732303449665 2024-11-22T19:24:13,939 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d36dfc16b900422d97324f65bb8ec32d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303450952 2024-11-22T19:24:13,939 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01bca872158f45e2973075e8800ff715, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303450952 2024-11-22T19:24:13,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,940 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:13,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/2a60825253454373830990d9ca130988 is 50, key is test_row_0/A:col10/1732303453096/Put/seqid=0 2024-11-22T19:24:13,959 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#510 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:13,959 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/a2efefaaeb854e29aa60a2999fb91c43 is 50, key is test_row_0/B:col10/1732303450957/Put/seqid=0 2024-11-22T19:24:13,962 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#511 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:13,963 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/af649385fa3245ba9db7eff539dd5ee6 is 50, key is test_row_0/A:col10/1732303450957/Put/seqid=0 2024-11-22T19:24:13,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742418_1594 (size=12301) 2024-11-22T19:24:13,971 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/2a60825253454373830990d9ca130988 2024-11-22T19:24:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742419_1595 (size=12731) 2024-11-22T19:24:13,982 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/a2efefaaeb854e29aa60a2999fb91c43 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2efefaaeb854e29aa60a2999fb91c43 2024-11-22T19:24:13,988 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into a2efefaaeb854e29aa60a2999fb91c43(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:13,988 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:13,988 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=12, startTime=1732303453937; duration=0sec 2024-11-22T19:24:13,988 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:13,988 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:13,988 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:13,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/fa0932f539574e9daf00c00cedba0122 is 50, key is test_row_0/B:col10/1732303453096/Put/seqid=0 2024-11-22T19:24:13,992 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46654 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:13,992 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:13,992 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:13,992 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/52bc6f8b327240dfb167f52c2accb316, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e3c192bd62fe460ba0f7c5384adfeff5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4ff5fc1f70134144abb13d372346f58a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9ba866c092bb4a49bd215245827f9467] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=45.6 K 2024-11-22T19:24:13,992 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 52bc6f8b327240dfb167f52c2accb316, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732303448751 2024-11-22T19:24:13,993 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e3c192bd62fe460ba0f7c5384adfeff5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732303448783 2024-11-22T19:24:13,993 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ff5fc1f70134144abb13d372346f58a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732303449665 2024-11-22T19:24:13,994 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ba866c092bb4a49bd215245827f9467, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303450952 2024-11-22T19:24:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742420_1596 (size=12731) 2024-11-22T19:24:14,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742421_1597 (size=12301) 2024-11-22T19:24:14,001 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/fa0932f539574e9daf00c00cedba0122 2024-11-22T19:24:14,003 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#513 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:14,004 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/32002256e662409f855b1d9095abe482 is 50, key is test_row_0/C:col10/1732303450957/Put/seqid=0 2024-11-22T19:24:14,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742422_1598 (size=12731) 2024-11-22T19:24:14,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/d9b6f0feeea74ab384c1219149caed8e is 50, key is test_row_0/C:col10/1732303453096/Put/seqid=0 2024-11-22T19:24:14,013 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/32002256e662409f855b1d9095abe482 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/32002256e662409f855b1d9095abe482 2024-11-22T19:24:14,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742423_1599 (size=12301) 2024-11-22T19:24:14,014 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/d9b6f0feeea74ab384c1219149caed8e 2024-11-22T19:24:14,018 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 32002256e662409f855b1d9095abe482(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:14,018 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:14,018 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=12, startTime=1732303453937; duration=0sec 2024-11-22T19:24:14,018 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:14,018 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:14,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/2a60825253454373830990d9ca130988 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2a60825253454373830990d9ca130988 2024-11-22T19:24:14,022 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2a60825253454373830990d9ca130988, entries=150, sequenceid=261, filesize=12.0 K 2024-11-22T19:24:14,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/fa0932f539574e9daf00c00cedba0122 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/fa0932f539574e9daf00c00cedba0122 2024-11-22T19:24:14,027 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/fa0932f539574e9daf00c00cedba0122, entries=150, sequenceid=261, filesize=12.0 K 2024-11-22T19:24:14,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/d9b6f0feeea74ab384c1219149caed8e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/d9b6f0feeea74ab384c1219149caed8e 2024-11-22T19:24:14,031 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/d9b6f0feeea74ab384c1219149caed8e, entries=150, sequenceid=261, filesize=12.0 K 2024-11-22T19:24:14,032 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 6827bb75511a85c0992cc4b6522bf5f1 in 92ms, sequenceid=261, compaction requested=false 2024-11-22T19:24:14,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:14,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-22T19:24:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-22T19:24:14,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-22T19:24:14,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0080 sec 2024-11-22T19:24:14,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.0140 sec 2024-11-22T19:24:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T19:24:14,128 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-22T19:24:14,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:14,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-22T19:24:14,131 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:14,131 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:14,131 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:14,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:14,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:14,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:14,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:14,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:14,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:14,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:14,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:14,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:14,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/1b8adb5daa794beeb58178b464bdc222 is 50, key is test_row_0/A:col10/1732303454234/Put/seqid=0 2024-11-22T19:24:14,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742424_1600 (size=19621) 2024-11-22T19:24:14,283 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:14,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:14,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:14,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303514304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303514307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303514312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,399 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/af649385fa3245ba9db7eff539dd5ee6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/af649385fa3245ba9db7eff539dd5ee6 2024-11-22T19:24:14,403 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into af649385fa3245ba9db7eff539dd5ee6(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:14,403 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:14,403 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=12, startTime=1732303453937; duration=0sec 2024-11-22T19:24:14,403 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:14,403 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:14,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303514413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303514413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303514414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:14,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:14,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,587 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:14,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303514619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303514619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303514619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/1b8adb5daa794beeb58178b464bdc222 2024-11-22T19:24:14,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/2e11507849f845ff9a93e6ce666dcbe0 is 50, key is test_row_0/B:col10/1732303454234/Put/seqid=0 2024-11-22T19:24:14,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742425_1601 (size=12301) 2024-11-22T19:24:14,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:14,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:14,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:14,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:14,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:14,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:14,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:14,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:14,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:14,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303514922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303514923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:14,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:14,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303514923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:15,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:15,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:15,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:15,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:15,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:15,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:15,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/2e11507849f845ff9a93e6ce666dcbe0 2024-11-22T19:24:15,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/102613682949456887c9c486fa494fb5 is 50, key is test_row_0/C:col10/1732303454234/Put/seqid=0 2024-11-22T19:24:15,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742426_1602 (size=12301) 2024-11-22T19:24:15,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/102613682949456887c9c486fa494fb5 2024-11-22T19:24:15,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/1b8adb5daa794beeb58178b464bdc222 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1b8adb5daa794beeb58178b464bdc222 2024-11-22T19:24:15,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1b8adb5daa794beeb58178b464bdc222, entries=300, sequenceid=275, filesize=19.2 K 2024-11-22T19:24:15,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/2e11507849f845ff9a93e6ce666dcbe0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2e11507849f845ff9a93e6ce666dcbe0 2024-11-22T19:24:15,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2e11507849f845ff9a93e6ce666dcbe0, entries=150, sequenceid=275, filesize=12.0 K 2024-11-22T19:24:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/102613682949456887c9c486fa494fb5 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/102613682949456887c9c486fa494fb5 2024-11-22T19:24:15,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/102613682949456887c9c486fa494fb5, entries=150, sequenceid=275, filesize=12.0 K 2024-11-22T19:24:15,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6827bb75511a85c0992cc4b6522bf5f1 in 848ms, sequenceid=275, compaction requested=true 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:15,087 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:15,087 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:15,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:15,088 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:15,088 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:15,088 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:15,088 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:15,088 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:15,088 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:15,088 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2efefaaeb854e29aa60a2999fb91c43, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/fa0932f539574e9daf00c00cedba0122, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2e11507849f845ff9a93e6ce666dcbe0] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=36.5 K 2024-11-22T19:24:15,088 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/af649385fa3245ba9db7eff539dd5ee6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2a60825253454373830990d9ca130988, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1b8adb5daa794beeb58178b464bdc222] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=43.6 K 2024-11-22T19:24:15,088 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a2efefaaeb854e29aa60a2999fb91c43, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303450952 2024-11-22T19:24:15,089 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting af649385fa3245ba9db7eff539dd5ee6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303450952 2024-11-22T19:24:15,089 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting fa0932f539574e9daf00c00cedba0122, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732303453096 2024-11-22T19:24:15,089 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a60825253454373830990d9ca130988, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732303453096 2024-11-22T19:24:15,089 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b8adb5daa794beeb58178b464bdc222, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732303454227 2024-11-22T19:24:15,089 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e11507849f845ff9a93e6ce666dcbe0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732303454234 2024-11-22T19:24:15,097 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#518 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:15,097 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/a2430175e7da47edb2772e99800d0622 is 50, key is test_row_0/B:col10/1732303454234/Put/seqid=0 2024-11-22T19:24:15,098 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:15,099 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/2397b3086d40406f88833b813483469c is 50, key is test_row_0/A:col10/1732303454234/Put/seqid=0 2024-11-22T19:24:15,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742427_1603 (size=12983) 2024-11-22T19:24:15,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742428_1604 (size=12983) 2024-11-22T19:24:15,110 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/2397b3086d40406f88833b813483469c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2397b3086d40406f88833b813483469c 2024-11-22T19:24:15,114 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into 2397b3086d40406f88833b813483469c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:15,114 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:15,114 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=13, startTime=1732303455087; duration=0sec 2024-11-22T19:24:15,114 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:15,114 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:15,114 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:15,115 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:15,115 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:15,115 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:15,115 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/32002256e662409f855b1d9095abe482, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/d9b6f0feeea74ab384c1219149caed8e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/102613682949456887c9c486fa494fb5] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=36.5 K 2024-11-22T19:24:15,115 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32002256e662409f855b1d9095abe482, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303450952 2024-11-22T19:24:15,116 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9b6f0feeea74ab384c1219149caed8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732303453096 2024-11-22T19:24:15,116 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 102613682949456887c9c486fa494fb5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732303454234 2024-11-22T19:24:15,122 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:15,122 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/5621cfb597c34aeea70daa098b85e984 is 50, key is test_row_0/C:col10/1732303454234/Put/seqid=0 2024-11-22T19:24:15,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742429_1605 (size=12983) 2024-11-22T19:24:15,198 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:15,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:15,199 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:15,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:15,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b98dcca1b2034bb78f8d882cb38afb8c is 50, key is test_row_0/A:col10/1732303454311/Put/seqid=0 2024-11-22T19:24:15,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742430_1606 (size=12301) 2024-11-22T19:24:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:15,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:15,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303515474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303515474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303515474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,510 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/a2430175e7da47edb2772e99800d0622 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2430175e7da47edb2772e99800d0622 2024-11-22T19:24:15,513 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into a2430175e7da47edb2772e99800d0622(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:15,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:15,513 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=13, startTime=1732303455087; duration=0sec 2024-11-22T19:24:15,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:15,513 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:15,530 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/5621cfb597c34aeea70daa098b85e984 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5621cfb597c34aeea70daa098b85e984 2024-11-22T19:24:15,534 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 5621cfb597c34aeea70daa098b85e984(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:15,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:15,534 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=13, startTime=1732303455087; duration=0sec 2024-11-22T19:24:15,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:15,534 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:15,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303515584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303515584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303515584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,611 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b98dcca1b2034bb78f8d882cb38afb8c 2024-11-22T19:24:15,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/df0f6c9978a34e2e844e61d63fa04831 is 50, key is test_row_0/B:col10/1732303454311/Put/seqid=0 2024-11-22T19:24:15,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742431_1607 (size=12301) 2024-11-22T19:24:15,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303515788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303515788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303515788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,023 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/df0f6c9978a34e2e844e61d63fa04831 2024-11-22T19:24:16,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/13afa3f9c3eb474f9b06916b7ef9ca35 is 50, key is test_row_0/C:col10/1732303454311/Put/seqid=0 2024-11-22T19:24:16,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742432_1608 (size=12301) 2024-11-22T19:24:16,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303516093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303516094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303516095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:16,435 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/13afa3f9c3eb474f9b06916b7ef9ca35 2024-11-22T19:24:16,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b98dcca1b2034bb78f8d882cb38afb8c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b98dcca1b2034bb78f8d882cb38afb8c 2024-11-22T19:24:16,442 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b98dcca1b2034bb78f8d882cb38afb8c, entries=150, sequenceid=301, filesize=12.0 K 2024-11-22T19:24:16,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/df0f6c9978a34e2e844e61d63fa04831 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/df0f6c9978a34e2e844e61d63fa04831 2024-11-22T19:24:16,445 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/df0f6c9978a34e2e844e61d63fa04831, entries=150, sequenceid=301, filesize=12.0 K 2024-11-22T19:24:16,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/13afa3f9c3eb474f9b06916b7ef9ca35 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13afa3f9c3eb474f9b06916b7ef9ca35 2024-11-22T19:24:16,449 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13afa3f9c3eb474f9b06916b7ef9ca35, entries=150, sequenceid=301, filesize=12.0 K 2024-11-22T19:24:16,449 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 6827bb75511a85c0992cc4b6522bf5f1 in 1250ms, sequenceid=301, compaction requested=false 2024-11-22T19:24:16,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:16,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:16,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-22T19:24:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-22T19:24:16,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-22T19:24:16,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3200 sec 2024-11-22T19:24:16,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 2.3230 sec 2024-11-22T19:24:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:16,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:24:16,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:16,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:16,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:16,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:16,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:16,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:16,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d97089ad798c40c29d135c6f9d75b9b7 is 50, key is test_row_0/A:col10/1732303455441/Put/seqid=0 2024-11-22T19:24:16,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742433_1609 (size=14741) 2024-11-22T19:24:16,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d97089ad798c40c29d135c6f9d75b9b7 2024-11-22T19:24:16,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/ae3c101e54544909a81f8a3c30998bd3 is 50, key is test_row_0/B:col10/1732303455441/Put/seqid=0 2024-11-22T19:24:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742434_1610 (size=12301) 2024-11-22T19:24:16,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303516639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303516640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303516644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303516747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303516747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303516751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41004 deadline: 1732303516794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,798 DEBUG [Thread-2379 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:16,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303516953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303516954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:16,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:16,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303516957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/ae3c101e54544909a81f8a3c30998bd3 2024-11-22T19:24:17,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/0395400e56a749e182a4193189a5ef6d is 50, key is test_row_0/C:col10/1732303455441/Put/seqid=0 2024-11-22T19:24:17,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742435_1611 (size=12301) 2024-11-22T19:24:17,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303517258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303517259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303517263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/0395400e56a749e182a4193189a5ef6d 2024-11-22T19:24:17,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/d97089ad798c40c29d135c6f9d75b9b7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d97089ad798c40c29d135c6f9d75b9b7 2024-11-22T19:24:17,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d97089ad798c40c29d135c6f9d75b9b7, entries=200, sequenceid=318, filesize=14.4 K 2024-11-22T19:24:17,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/ae3c101e54544909a81f8a3c30998bd3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ae3c101e54544909a81f8a3c30998bd3 2024-11-22T19:24:17,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ae3c101e54544909a81f8a3c30998bd3, entries=150, sequenceid=318, filesize=12.0 K 2024-11-22T19:24:17,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/0395400e56a749e182a4193189a5ef6d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0395400e56a749e182a4193189a5ef6d 2024-11-22T19:24:17,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0395400e56a749e182a4193189a5ef6d, entries=150, sequenceid=318, filesize=12.0 K 2024-11-22T19:24:17,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6827bb75511a85c0992cc4b6522bf5f1 in 869ms, sequenceid=318, compaction requested=true 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:17,471 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:17,471 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:17,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:17,472 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:17,472 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:17,472 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:17,472 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:17,472 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:17,472 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:17,472 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2430175e7da47edb2772e99800d0622, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/df0f6c9978a34e2e844e61d63fa04831, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ae3c101e54544909a81f8a3c30998bd3] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=36.7 K 2024-11-22T19:24:17,472 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2397b3086d40406f88833b813483469c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b98dcca1b2034bb78f8d882cb38afb8c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d97089ad798c40c29d135c6f9d75b9b7] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=39.1 K 2024-11-22T19:24:17,473 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a2430175e7da47edb2772e99800d0622, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732303454234 2024-11-22T19:24:17,473 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2397b3086d40406f88833b813483469c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732303454234 2024-11-22T19:24:17,473 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b98dcca1b2034bb78f8d882cb38afb8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732303454295 2024-11-22T19:24:17,473 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting df0f6c9978a34e2e844e61d63fa04831, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732303454295 2024-11-22T19:24:17,473 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d97089ad798c40c29d135c6f9d75b9b7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732303455438 2024-11-22T19:24:17,473 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ae3c101e54544909a81f8a3c30998bd3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732303455441 2024-11-22T19:24:17,478 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:17,478 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#528 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:17,479 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c5ccccb8879245dba36122d5be7622a2 is 50, key is test_row_0/B:col10/1732303455441/Put/seqid=0 2024-11-22T19:24:17,479 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/a7aa3252f63b4f23ba8ea49c18fcc586 is 50, key is test_row_0/A:col10/1732303455441/Put/seqid=0 2024-11-22T19:24:17,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742436_1612 (size=13085) 2024-11-22T19:24:17,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742437_1613 (size=13085) 2024-11-22T19:24:17,486 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/c5ccccb8879245dba36122d5be7622a2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c5ccccb8879245dba36122d5be7622a2 2024-11-22T19:24:17,486 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/a7aa3252f63b4f23ba8ea49c18fcc586 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/a7aa3252f63b4f23ba8ea49c18fcc586 2024-11-22T19:24:17,491 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into a7aa3252f63b4f23ba8ea49c18fcc586(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:17,491 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into c5ccccb8879245dba36122d5be7622a2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:17,492 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=13, startTime=1732303457471; duration=0sec 2024-11-22T19:24:17,492 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=13, startTime=1732303457471; duration=0sec 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:17,492 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:17,492 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:17,493 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5621cfb597c34aeea70daa098b85e984, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13afa3f9c3eb474f9b06916b7ef9ca35, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0395400e56a749e182a4193189a5ef6d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=36.7 K 2024-11-22T19:24:17,493 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5621cfb597c34aeea70daa098b85e984, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732303454234 2024-11-22T19:24:17,493 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 13afa3f9c3eb474f9b06916b7ef9ca35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732303454295 2024-11-22T19:24:17,493 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0395400e56a749e182a4193189a5ef6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732303455441 2024-11-22T19:24:17,499 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:17,500 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/7c5dbe0e6d7245c5b0fd687888092d07 is 50, key is test_row_0/C:col10/1732303455441/Put/seqid=0 2024-11-22T19:24:17,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742438_1614 (size=13085) 2024-11-22T19:24:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:17,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T19:24:17,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:17,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:17,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:17,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:17,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:17,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:17,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/8363e1f0644e45ae8dace44536e5860d is 50, key is test_row_0/A:col10/1732303456639/Put/seqid=0 2024-11-22T19:24:17,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742439_1615 (size=14741) 2024-11-22T19:24:17,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/8363e1f0644e45ae8dace44536e5860d 2024-11-22T19:24:17,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/961152be29ba48139e63531299a5dd1a is 50, key is test_row_0/B:col10/1732303456639/Put/seqid=0 2024-11-22T19:24:17,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742440_1616 (size=12301) 2024-11-22T19:24:17,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/961152be29ba48139e63531299a5dd1a 2024-11-22T19:24:17,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303517784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303517785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303517786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/a45f27b566fe4da6889c34146e31395b is 50, key is test_row_0/C:col10/1732303456639/Put/seqid=0 2024-11-22T19:24:17,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742441_1617 (size=12301) 2024-11-22T19:24:17,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40982 deadline: 1732303517861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,864 DEBUG [Thread-2375 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., hostname=a307a1377457,35917,1732303314657, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:17,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303517887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303517891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:17,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303517891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:17,907 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/7c5dbe0e6d7245c5b0fd687888092d07 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/7c5dbe0e6d7245c5b0fd687888092d07 2024-11-22T19:24:17,910 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 7c5dbe0e6d7245c5b0fd687888092d07(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:17,910 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:17,910 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=13, startTime=1732303457471; duration=0sec 2024-11-22T19:24:17,910 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:17,910 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:18,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303518090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303518097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303518098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/a45f27b566fe4da6889c34146e31395b 2024-11-22T19:24:18,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/8363e1f0644e45ae8dace44536e5860d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/8363e1f0644e45ae8dace44536e5860d 2024-11-22T19:24:18,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/8363e1f0644e45ae8dace44536e5860d, entries=200, sequenceid=342, filesize=14.4 K 2024-11-22T19:24:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/961152be29ba48139e63531299a5dd1a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/961152be29ba48139e63531299a5dd1a 2024-11-22T19:24:18,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/961152be29ba48139e63531299a5dd1a, entries=150, sequenceid=342, filesize=12.0 K 2024-11-22T19:24:18,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/a45f27b566fe4da6889c34146e31395b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a45f27b566fe4da6889c34146e31395b 2024-11-22T19:24:18,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a45f27b566fe4da6889c34146e31395b, entries=150, sequenceid=342, filesize=12.0 K 2024-11-22T19:24:18,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6827bb75511a85c0992cc4b6522bf5f1 in 443ms, sequenceid=342, compaction requested=false 2024-11-22T19:24:18,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T19:24:18,236 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-22T19:24:18,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:18,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-22T19:24:18,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T19:24:18,239 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:18,239 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:18,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:18,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T19:24:18,391 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:18,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:18,392 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/ba6f97d2e0e042b2a5bd739bd62cbbb0 is 50, key is test_row_0/A:col10/1732303457784/Put/seqid=0 2024-11-22T19:24:18,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:18,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. as already flushing 2024-11-22T19:24:18,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742442_1618 (size=12301) 2024-11-22T19:24:18,401 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/ba6f97d2e0e042b2a5bd739bd62cbbb0 2024-11-22T19:24:18,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/d2f2bff081c148aeb420a8ccda46a581 is 50, key is test_row_0/B:col10/1732303457784/Put/seqid=0 2024-11-22T19:24:18,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742443_1619 (size=12301) 2024-11-22T19:24:18,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303518442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303518443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303518447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T19:24:18,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303518548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303518552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303518552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,597 DEBUG [Thread-2386 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:57120 2024-11-22T19:24:18,597 DEBUG [Thread-2386 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:18,598 DEBUG [Thread-2390 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:57120 2024-11-22T19:24:18,598 DEBUG [Thread-2390 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:18,599 DEBUG [Thread-2392 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:57120 2024-11-22T19:24:18,599 DEBUG [Thread-2392 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:18,601 DEBUG [Thread-2394 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:57120 2024-11-22T19:24:18,601 DEBUG [Thread-2394 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:18,603 DEBUG [Thread-2388 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:57120 2024-11-22T19:24:18,603 DEBUG [Thread-2388 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:18,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303518756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303518758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303518758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:18,824 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/d2f2bff081c148aeb420a8ccda46a581 2024-11-22T19:24:18,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/a5a86d957d2b4a8e904c11aa6607b035 is 50, key is test_row_0/C:col10/1732303457784/Put/seqid=0 2024-11-22T19:24:18,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742444_1620 (size=12301) 2024-11-22T19:24:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T19:24:19,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41022 deadline: 1732303519057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:19,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41042 deadline: 1732303519060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:19,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41018 deadline: 1732303519060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:19,233 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/a5a86d957d2b4a8e904c11aa6607b035 2024-11-22T19:24:19,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/ba6f97d2e0e042b2a5bd739bd62cbbb0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ba6f97d2e0e042b2a5bd739bd62cbbb0 2024-11-22T19:24:19,239 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ba6f97d2e0e042b2a5bd739bd62cbbb0, entries=150, sequenceid=357, filesize=12.0 K 2024-11-22T19:24:19,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/d2f2bff081c148aeb420a8ccda46a581 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d2f2bff081c148aeb420a8ccda46a581 2024-11-22T19:24:19,242 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d2f2bff081c148aeb420a8ccda46a581, entries=150, sequenceid=357, filesize=12.0 K 2024-11-22T19:24:19,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/a5a86d957d2b4a8e904c11aa6607b035 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a5a86d957d2b4a8e904c11aa6607b035 2024-11-22T19:24:19,246 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a5a86d957d2b4a8e904c11aa6607b035, entries=150, sequenceid=357, filesize=12.0 K 2024-11-22T19:24:19,246 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6827bb75511a85c0992cc4b6522bf5f1 in 854ms, sequenceid=357, compaction requested=true 2024-11-22T19:24:19,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:19,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:19,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-22T19:24:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-22T19:24:19,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-22T19:24:19,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0070 sec 2024-11-22T19:24:19,249 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.0120 sec 2024-11-22T19:24:19,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T19:24:19,342 INFO [Thread-2385 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-22T19:24:19,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:19,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T19:24:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:19,560 DEBUG [Thread-2377 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:57120 2024-11-22T19:24:19,560 DEBUG [Thread-2377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:19,562 DEBUG [Thread-2381 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:57120 2024-11-22T19:24:19,562 DEBUG [Thread-2381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:19,563 DEBUG [Thread-2383 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:57120 2024-11-22T19:24:19,563 DEBUG [Thread-2383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:19,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e097bc72cbcd47bc9af8f6ad2939df0d is 50, key is test_row_0/A:col10/1732303458430/Put/seqid=0 2024-11-22T19:24:19,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742445_1621 (size=12301) 2024-11-22T19:24:19,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e097bc72cbcd47bc9af8f6ad2939df0d 2024-11-22T19:24:19,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/7076f9842bd2430d8ba71bb7f062aa70 is 50, key is test_row_0/B:col10/1732303458430/Put/seqid=0 2024-11-22T19:24:19,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742446_1622 (size=12301) 2024-11-22T19:24:20,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/7076f9842bd2430d8ba71bb7f062aa70 2024-11-22T19:24:20,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/0e1a77aed91b44318bb6de79ab1df105 is 50, key is test_row_0/C:col10/1732303458430/Put/seqid=0 2024-11-22T19:24:20,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742447_1623 (size=12301) 2024-11-22T19:24:20,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/0e1a77aed91b44318bb6de79ab1df105 2024-11-22T19:24:20,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/e097bc72cbcd47bc9af8f6ad2939df0d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e097bc72cbcd47bc9af8f6ad2939df0d 2024-11-22T19:24:20,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e097bc72cbcd47bc9af8f6ad2939df0d, entries=150, sequenceid=380, filesize=12.0 K 2024-11-22T19:24:20,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/7076f9842bd2430d8ba71bb7f062aa70 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7076f9842bd2430d8ba71bb7f062aa70 2024-11-22T19:24:20,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7076f9842bd2430d8ba71bb7f062aa70, entries=150, sequenceid=380, filesize=12.0 K 2024-11-22T19:24:20,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/0e1a77aed91b44318bb6de79ab1df105 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0e1a77aed91b44318bb6de79ab1df105 2024-11-22T19:24:20,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0e1a77aed91b44318bb6de79ab1df105, entries=150, sequenceid=380, filesize=12.0 K 2024-11-22T19:24:20,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=13.42 KB/13740 for 6827bb75511a85c0992cc4b6522bf5f1 in 1237ms, sequenceid=380, compaction requested=true 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:20,798 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6827bb75511a85c0992cc4b6522bf5f1:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:20,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:20,798 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52428 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/A is initiating minor compaction (all files) 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/B is initiating minor compaction (all files) 2024-11-22T19:24:20,799 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/B in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:20,799 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/A in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:20,799 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c5ccccb8879245dba36122d5be7622a2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/961152be29ba48139e63531299a5dd1a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d2f2bff081c148aeb420a8ccda46a581, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7076f9842bd2430d8ba71bb7f062aa70] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=48.8 K 2024-11-22T19:24:20,799 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/a7aa3252f63b4f23ba8ea49c18fcc586, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/8363e1f0644e45ae8dace44536e5860d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ba6f97d2e0e042b2a5bd739bd62cbbb0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e097bc72cbcd47bc9af8f6ad2939df0d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=51.2 K 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7aa3252f63b4f23ba8ea49c18fcc586, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732303455441 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c5ccccb8879245dba36122d5be7622a2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732303455441 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 961152be29ba48139e63531299a5dd1a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732303456639 2024-11-22T19:24:20,799 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8363e1f0644e45ae8dace44536e5860d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732303456633 2024-11-22T19:24:20,800 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d2f2bff081c148aeb420a8ccda46a581, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732303457771 2024-11-22T19:24:20,800 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba6f97d2e0e042b2a5bd739bd62cbbb0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732303457771 2024-11-22T19:24:20,800 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7076f9842bd2430d8ba71bb7f062aa70, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732303458430 2024-11-22T19:24:20,800 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e097bc72cbcd47bc9af8f6ad2939df0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732303458430 2024-11-22T19:24:20,806 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#A#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:20,806 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#B#compaction#539 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:20,807 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/21a793da489f4d759b01c5eff704204e is 50, key is test_row_0/A:col10/1732303458430/Put/seqid=0 2024-11-22T19:24:20,807 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/837ac07670e94b0c9640977ad51358de is 50, key is test_row_0/B:col10/1732303458430/Put/seqid=0 2024-11-22T19:24:20,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742448_1624 (size=13221) 2024-11-22T19:24:20,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742449_1625 (size=13221) 2024-11-22T19:24:21,214 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/837ac07670e94b0c9640977ad51358de as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/837ac07670e94b0c9640977ad51358de 2024-11-22T19:24:21,214 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/21a793da489f4d759b01c5eff704204e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/21a793da489f4d759b01c5eff704204e 2024-11-22T19:24:21,217 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/A of 6827bb75511a85c0992cc4b6522bf5f1 into 21a793da489f4d759b01c5eff704204e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:21,217 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/B of 6827bb75511a85c0992cc4b6522bf5f1 into 837ac07670e94b0c9640977ad51358de(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:21,217 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/A, priority=12, startTime=1732303460798; duration=0sec 2024-11-22T19:24:21,217 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/B, priority=12, startTime=1732303460798; duration=0sec 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:B 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:A 2024-11-22T19:24:21,217 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:21,218 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:21,218 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 6827bb75511a85c0992cc4b6522bf5f1/C is initiating minor compaction (all files) 2024-11-22T19:24:21,218 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6827bb75511a85c0992cc4b6522bf5f1/C in TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:21,218 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/7c5dbe0e6d7245c5b0fd687888092d07, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a45f27b566fe4da6889c34146e31395b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a5a86d957d2b4a8e904c11aa6607b035, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0e1a77aed91b44318bb6de79ab1df105] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp, totalSize=48.8 K 2024-11-22T19:24:21,218 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c5dbe0e6d7245c5b0fd687888092d07, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732303455441 2024-11-22T19:24:21,219 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a45f27b566fe4da6889c34146e31395b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732303456639 2024-11-22T19:24:21,219 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a5a86d957d2b4a8e904c11aa6607b035, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732303457771 2024-11-22T19:24:21,219 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e1a77aed91b44318bb6de79ab1df105, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732303458430 2024-11-22T19:24:21,225 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6827bb75511a85c0992cc4b6522bf5f1#C#compaction#541 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:21,226 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/09b1c2e9499c42fcb811284bb5541db0 is 50, key is test_row_0/C:col10/1732303458430/Put/seqid=0 2024-11-22T19:24:21,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742450_1626 (size=13221) 2024-11-22T19:24:21,638 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/09b1c2e9499c42fcb811284bb5541db0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/09b1c2e9499c42fcb811284bb5541db0 2024-11-22T19:24:21,641 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6827bb75511a85c0992cc4b6522bf5f1/C of 6827bb75511a85c0992cc4b6522bf5f1 into 09b1c2e9499c42fcb811284bb5541db0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:21,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:21,641 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1., storeName=6827bb75511a85c0992cc4b6522bf5f1/C, priority=12, startTime=1732303460798; duration=0sec 2024-11-22T19:24:21,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:21,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6827bb75511a85c0992cc4b6522bf5f1:C 2024-11-22T19:24:22,401 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T19:24:26,804 DEBUG [Thread-2379 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:57120 2024-11-22T19:24:26,804 DEBUG [Thread-2379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:27,949 DEBUG [Thread-2375 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:57120 2024-11-22T19:24:27,949 DEBUG [Thread-2375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2929 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8787 rows 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2918 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8754 rows 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2916 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8748 rows 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2923 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8769 rows 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2929 2024-11-22T19:24:27,949 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8787 rows 2024-11-22T19:24:27,949 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:24:27,949 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:57120 2024-11-22T19:24:27,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:27,953 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T19:24:27,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T19:24:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T19:24:27,957 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303467956"}]},"ts":"1732303467956"} 2024-11-22T19:24:27,957 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T19:24:27,959 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T19:24:27,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:24:27,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, UNASSIGN}] 2024-11-22T19:24:27,961 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, UNASSIGN 2024-11-22T19:24:27,961 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=6827bb75511a85c0992cc4b6522bf5f1, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:27,962 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:24:27,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure 6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:24:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T19:24:28,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:28,114 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing 6827bb75511a85c0992cc4b6522bf5f1, disabling compactions & flushes 2024-11-22T19:24:28,114 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. after waiting 0 ms 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:28,114 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing 6827bb75511a85c0992cc4b6522bf5f1 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=A 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=B 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6827bb75511a85c0992cc4b6522bf5f1, store=C 2024-11-22T19:24:28,114 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:28,118 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b500b86cc1834eabb6c34733170c80ec is 50, key is test_row_0/A:col10/1732303466803/Put/seqid=0 2024-11-22T19:24:28,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742451_1627 (size=12301) 2024-11-22T19:24:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T19:24:28,522 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b500b86cc1834eabb6c34733170c80ec 2024-11-22T19:24:28,527 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/7b922a6e446b428a815b1fb9081d4393 is 50, key is test_row_0/B:col10/1732303466803/Put/seqid=0 2024-11-22T19:24:28,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742452_1628 (size=12301) 2024-11-22T19:24:28,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T19:24:28,931 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/7b922a6e446b428a815b1fb9081d4393 2024-11-22T19:24:28,936 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/5f895d41b009423d8d6abfde9c259add is 50, key is test_row_0/C:col10/1732303466803/Put/seqid=0 2024-11-22T19:24:28,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742453_1629 (size=12301) 2024-11-22T19:24:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T19:24:29,340 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/5f895d41b009423d8d6abfde9c259add 2024-11-22T19:24:29,343 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/A/b500b86cc1834eabb6c34733170c80ec as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b500b86cc1834eabb6c34733170c80ec 2024-11-22T19:24:29,345 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b500b86cc1834eabb6c34733170c80ec, entries=150, sequenceid=390, filesize=12.0 K 2024-11-22T19:24:29,346 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/B/7b922a6e446b428a815b1fb9081d4393 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7b922a6e446b428a815b1fb9081d4393 2024-11-22T19:24:29,348 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7b922a6e446b428a815b1fb9081d4393, entries=150, sequenceid=390, filesize=12.0 K 2024-11-22T19:24:29,348 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/.tmp/C/5f895d41b009423d8d6abfde9c259add as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5f895d41b009423d8d6abfde9c259add 2024-11-22T19:24:29,351 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5f895d41b009423d8d6abfde9c259add, entries=150, sequenceid=390, filesize=12.0 K 2024-11-22T19:24:29,351 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6827bb75511a85c0992cc4b6522bf5f1 in 1237ms, sequenceid=390, compaction requested=false 2024-11-22T19:24:29,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9e987d4c7c8d461c943e79b49e40d5bb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9beb5a5f64634c0cad50f3a81b1747ef, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/0a2178e712fa48efb5d902b9b6859788, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d08372f09feb46afb269ef8c60135180, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ce81e3b114de497a830103edaeea9cdd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/00333b8e803940e496a50d5442dce0ad, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/32bbdd23ca0c4404abb3eda20ebd7cf2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/3a4fa3f7f84b4295a452428cb61419f7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e3f9cdc067614acf90d95cb4f9f2a279, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b36fe71b6cd44ac99c3f8d11bd695e29, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d09dd1cfe09b4385baefdd69cf5a86b7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/37686381074b43109ae4eb1a8c1b71a8, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e56f50ee242b4d82aeba9fe0031b9713, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/fe322ee00e4f45ec87895e2d85b45de0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1319214680cf45beae1d0a38f6ab07fe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/768a60926245441798f8f1b785414b92, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/01bca872158f45e2973075e8800ff715, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/af649385fa3245ba9db7eff539dd5ee6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2a60825253454373830990d9ca130988, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1b8adb5daa794beeb58178b464bdc222, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2397b3086d40406f88833b813483469c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b98dcca1b2034bb78f8d882cb38afb8c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d97089ad798c40c29d135c6f9d75b9b7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/a7aa3252f63b4f23ba8ea49c18fcc586, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/8363e1f0644e45ae8dace44536e5860d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ba6f97d2e0e042b2a5bd739bd62cbbb0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e097bc72cbcd47bc9af8f6ad2939df0d] to archive 2024-11-22T19:24:29,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:24:29,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9e987d4c7c8d461c943e79b49e40d5bb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9e987d4c7c8d461c943e79b49e40d5bb 2024-11-22T19:24:29,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9beb5a5f64634c0cad50f3a81b1747ef to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/9beb5a5f64634c0cad50f3a81b1747ef 2024-11-22T19:24:29,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/0a2178e712fa48efb5d902b9b6859788 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/0a2178e712fa48efb5d902b9b6859788 2024-11-22T19:24:29,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d08372f09feb46afb269ef8c60135180 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d08372f09feb46afb269ef8c60135180 2024-11-22T19:24:29,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ce81e3b114de497a830103edaeea9cdd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ce81e3b114de497a830103edaeea9cdd 2024-11-22T19:24:29,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/00333b8e803940e496a50d5442dce0ad to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/00333b8e803940e496a50d5442dce0ad 2024-11-22T19:24:29,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/32bbdd23ca0c4404abb3eda20ebd7cf2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/32bbdd23ca0c4404abb3eda20ebd7cf2 2024-11-22T19:24:29,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/3a4fa3f7f84b4295a452428cb61419f7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/3a4fa3f7f84b4295a452428cb61419f7 2024-11-22T19:24:29,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e3f9cdc067614acf90d95cb4f9f2a279 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e3f9cdc067614acf90d95cb4f9f2a279 2024-11-22T19:24:29,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b36fe71b6cd44ac99c3f8d11bd695e29 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b36fe71b6cd44ac99c3f8d11bd695e29 2024-11-22T19:24:29,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d09dd1cfe09b4385baefdd69cf5a86b7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d09dd1cfe09b4385baefdd69cf5a86b7 2024-11-22T19:24:29,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/37686381074b43109ae4eb1a8c1b71a8 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/37686381074b43109ae4eb1a8c1b71a8 2024-11-22T19:24:29,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e56f50ee242b4d82aeba9fe0031b9713 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e56f50ee242b4d82aeba9fe0031b9713 2024-11-22T19:24:29,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/fe322ee00e4f45ec87895e2d85b45de0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/fe322ee00e4f45ec87895e2d85b45de0 2024-11-22T19:24:29,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1319214680cf45beae1d0a38f6ab07fe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1319214680cf45beae1d0a38f6ab07fe 2024-11-22T19:24:29,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/768a60926245441798f8f1b785414b92 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/768a60926245441798f8f1b785414b92 2024-11-22T19:24:29,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/01bca872158f45e2973075e8800ff715 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/01bca872158f45e2973075e8800ff715 2024-11-22T19:24:29,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/af649385fa3245ba9db7eff539dd5ee6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/af649385fa3245ba9db7eff539dd5ee6 2024-11-22T19:24:29,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2a60825253454373830990d9ca130988 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2a60825253454373830990d9ca130988 2024-11-22T19:24:29,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1b8adb5daa794beeb58178b464bdc222 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/1b8adb5daa794beeb58178b464bdc222 2024-11-22T19:24:29,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2397b3086d40406f88833b813483469c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/2397b3086d40406f88833b813483469c 2024-11-22T19:24:29,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b98dcca1b2034bb78f8d882cb38afb8c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b98dcca1b2034bb78f8d882cb38afb8c 2024-11-22T19:24:29,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d97089ad798c40c29d135c6f9d75b9b7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/d97089ad798c40c29d135c6f9d75b9b7 2024-11-22T19:24:29,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/a7aa3252f63b4f23ba8ea49c18fcc586 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/a7aa3252f63b4f23ba8ea49c18fcc586 2024-11-22T19:24:29,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/8363e1f0644e45ae8dace44536e5860d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/8363e1f0644e45ae8dace44536e5860d 2024-11-22T19:24:29,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ba6f97d2e0e042b2a5bd739bd62cbbb0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/ba6f97d2e0e042b2a5bd739bd62cbbb0 2024-11-22T19:24:29,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e097bc72cbcd47bc9af8f6ad2939df0d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/e097bc72cbcd47bc9af8f6ad2939df0d 2024-11-22T19:24:29,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/31f22e06dff8484496ea532b91db8ce7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e644b2aeb299477793f0d38b811b9ff2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2c4652075cce4406880df8adf008a78d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/b8ef3202a6314068b55c6a6f0b7302aa, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/1c53978140ce4681896de4ae709922ce, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e8679a54ad6946c19b9f7af0226443b5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/0ef55204491d45f394ac9cf0f7c7d05e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e675b299ccdd4077b0d090c0f254919d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c6ea1ab157374cd6a2a8c89d1b234f7b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/4912e36db8774a449758a0e43813c869, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c41dde311eb1433aa7ecd9a408ab96d1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c8ae318e4da7447ca879f2d0635f4476, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/6f8221221d5b4edea648e4c69837ed85, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/174e88a402be41feaff4195b9413cab5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ed4bbe3e868c433590e529a4343a50b5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/3fd28fc23fb04194bbf87caf65dfcb46, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2efefaaeb854e29aa60a2999fb91c43, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d36dfc16b900422d97324f65bb8ec32d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/fa0932f539574e9daf00c00cedba0122, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2430175e7da47edb2772e99800d0622, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2e11507849f845ff9a93e6ce666dcbe0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/df0f6c9978a34e2e844e61d63fa04831, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c5ccccb8879245dba36122d5be7622a2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ae3c101e54544909a81f8a3c30998bd3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/961152be29ba48139e63531299a5dd1a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d2f2bff081c148aeb420a8ccda46a581, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7076f9842bd2430d8ba71bb7f062aa70] to archive 2024-11-22T19:24:29,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:24:29,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/31f22e06dff8484496ea532b91db8ce7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/31f22e06dff8484496ea532b91db8ce7 2024-11-22T19:24:29,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e644b2aeb299477793f0d38b811b9ff2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e644b2aeb299477793f0d38b811b9ff2 2024-11-22T19:24:29,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2c4652075cce4406880df8adf008a78d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2c4652075cce4406880df8adf008a78d 2024-11-22T19:24:29,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/b8ef3202a6314068b55c6a6f0b7302aa to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/b8ef3202a6314068b55c6a6f0b7302aa 2024-11-22T19:24:29,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/1c53978140ce4681896de4ae709922ce to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/1c53978140ce4681896de4ae709922ce 2024-11-22T19:24:29,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e8679a54ad6946c19b9f7af0226443b5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e8679a54ad6946c19b9f7af0226443b5 2024-11-22T19:24:29,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/0ef55204491d45f394ac9cf0f7c7d05e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/0ef55204491d45f394ac9cf0f7c7d05e 2024-11-22T19:24:29,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e675b299ccdd4077b0d090c0f254919d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/e675b299ccdd4077b0d090c0f254919d 2024-11-22T19:24:29,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c6ea1ab157374cd6a2a8c89d1b234f7b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c6ea1ab157374cd6a2a8c89d1b234f7b 2024-11-22T19:24:29,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/4912e36db8774a449758a0e43813c869 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/4912e36db8774a449758a0e43813c869 2024-11-22T19:24:29,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c41dde311eb1433aa7ecd9a408ab96d1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c41dde311eb1433aa7ecd9a408ab96d1 2024-11-22T19:24:29,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c8ae318e4da7447ca879f2d0635f4476 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c8ae318e4da7447ca879f2d0635f4476 2024-11-22T19:24:29,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/6f8221221d5b4edea648e4c69837ed85 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/6f8221221d5b4edea648e4c69837ed85 2024-11-22T19:24:29,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/174e88a402be41feaff4195b9413cab5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/174e88a402be41feaff4195b9413cab5 2024-11-22T19:24:29,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ed4bbe3e868c433590e529a4343a50b5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ed4bbe3e868c433590e529a4343a50b5 2024-11-22T19:24:29,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/3fd28fc23fb04194bbf87caf65dfcb46 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/3fd28fc23fb04194bbf87caf65dfcb46 2024-11-22T19:24:29,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2efefaaeb854e29aa60a2999fb91c43 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2efefaaeb854e29aa60a2999fb91c43 2024-11-22T19:24:29,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d36dfc16b900422d97324f65bb8ec32d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d36dfc16b900422d97324f65bb8ec32d 2024-11-22T19:24:29,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/fa0932f539574e9daf00c00cedba0122 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/fa0932f539574e9daf00c00cedba0122 2024-11-22T19:24:29,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2430175e7da47edb2772e99800d0622 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/a2430175e7da47edb2772e99800d0622 2024-11-22T19:24:29,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2e11507849f845ff9a93e6ce666dcbe0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/2e11507849f845ff9a93e6ce666dcbe0 2024-11-22T19:24:29,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/df0f6c9978a34e2e844e61d63fa04831 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/df0f6c9978a34e2e844e61d63fa04831 2024-11-22T19:24:29,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c5ccccb8879245dba36122d5be7622a2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/c5ccccb8879245dba36122d5be7622a2 2024-11-22T19:24:29,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ae3c101e54544909a81f8a3c30998bd3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/ae3c101e54544909a81f8a3c30998bd3 2024-11-22T19:24:29,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/961152be29ba48139e63531299a5dd1a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/961152be29ba48139e63531299a5dd1a 2024-11-22T19:24:29,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d2f2bff081c148aeb420a8ccda46a581 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/d2f2bff081c148aeb420a8ccda46a581 2024-11-22T19:24:29,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7076f9842bd2430d8ba71bb7f062aa70 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7076f9842bd2430d8ba71bb7f062aa70 2024-11-22T19:24:29,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e6255914b01b4b33917e0b1387d0e291, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9e0d4a1040574c9cb76b8d6cfd159aca, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13fc68e5e5124ec6bc31147734c31761, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e5b491d5b02e4f508349b0dd1f5eba27, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/bd8a3c3cb75142e69d0ad2e297a36606, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/dc5e95e41a764baba391464374fdd240, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/82bd192230b745efb5cad47ac5cf57eb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/2b3c7e3f76e84804a0be0fac44b6f9c6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/48ab642b4f7243b4be78d3c4cfb9a053, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/08c40c43e490482cb492ff336d0b8888, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4980a83ca0174b658c0da554b4a4d469, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/3dc40ab4b6d24720abdfd98c08c54025, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/52bc6f8b327240dfb167f52c2accb316, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/ee933004c3cc42ec8ea22833bae22e26, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e3c192bd62fe460ba0f7c5384adfeff5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4ff5fc1f70134144abb13d372346f58a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/32002256e662409f855b1d9095abe482, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9ba866c092bb4a49bd215245827f9467, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/d9b6f0feeea74ab384c1219149caed8e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5621cfb597c34aeea70daa098b85e984, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/102613682949456887c9c486fa494fb5, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13afa3f9c3eb474f9b06916b7ef9ca35, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/7c5dbe0e6d7245c5b0fd687888092d07, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0395400e56a749e182a4193189a5ef6d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a45f27b566fe4da6889c34146e31395b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a5a86d957d2b4a8e904c11aa6607b035, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0e1a77aed91b44318bb6de79ab1df105] to archive 2024-11-22T19:24:29,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:24:29,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e6255914b01b4b33917e0b1387d0e291 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e6255914b01b4b33917e0b1387d0e291 2024-11-22T19:24:29,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9e0d4a1040574c9cb76b8d6cfd159aca to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9e0d4a1040574c9cb76b8d6cfd159aca 2024-11-22T19:24:29,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13fc68e5e5124ec6bc31147734c31761 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13fc68e5e5124ec6bc31147734c31761 2024-11-22T19:24:29,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e5b491d5b02e4f508349b0dd1f5eba27 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e5b491d5b02e4f508349b0dd1f5eba27 2024-11-22T19:24:29,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/bd8a3c3cb75142e69d0ad2e297a36606 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/bd8a3c3cb75142e69d0ad2e297a36606 2024-11-22T19:24:29,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/dc5e95e41a764baba391464374fdd240 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/dc5e95e41a764baba391464374fdd240 2024-11-22T19:24:29,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/82bd192230b745efb5cad47ac5cf57eb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/82bd192230b745efb5cad47ac5cf57eb 2024-11-22T19:24:29,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/2b3c7e3f76e84804a0be0fac44b6f9c6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/2b3c7e3f76e84804a0be0fac44b6f9c6 2024-11-22T19:24:29,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/48ab642b4f7243b4be78d3c4cfb9a053 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/48ab642b4f7243b4be78d3c4cfb9a053 2024-11-22T19:24:29,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/08c40c43e490482cb492ff336d0b8888 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/08c40c43e490482cb492ff336d0b8888 2024-11-22T19:24:29,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4980a83ca0174b658c0da554b4a4d469 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4980a83ca0174b658c0da554b4a4d469 2024-11-22T19:24:29,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/3dc40ab4b6d24720abdfd98c08c54025 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/3dc40ab4b6d24720abdfd98c08c54025 2024-11-22T19:24:29,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/52bc6f8b327240dfb167f52c2accb316 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/52bc6f8b327240dfb167f52c2accb316 2024-11-22T19:24:29,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/ee933004c3cc42ec8ea22833bae22e26 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/ee933004c3cc42ec8ea22833bae22e26 2024-11-22T19:24:29,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e3c192bd62fe460ba0f7c5384adfeff5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/e3c192bd62fe460ba0f7c5384adfeff5 2024-11-22T19:24:29,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4ff5fc1f70134144abb13d372346f58a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/4ff5fc1f70134144abb13d372346f58a 2024-11-22T19:24:29,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/32002256e662409f855b1d9095abe482 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/32002256e662409f855b1d9095abe482 2024-11-22T19:24:29,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9ba866c092bb4a49bd215245827f9467 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/9ba866c092bb4a49bd215245827f9467 2024-11-22T19:24:29,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/d9b6f0feeea74ab384c1219149caed8e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/d9b6f0feeea74ab384c1219149caed8e 2024-11-22T19:24:29,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5621cfb597c34aeea70daa098b85e984 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5621cfb597c34aeea70daa098b85e984 2024-11-22T19:24:29,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/102613682949456887c9c486fa494fb5 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/102613682949456887c9c486fa494fb5 2024-11-22T19:24:29,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13afa3f9c3eb474f9b06916b7ef9ca35 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/13afa3f9c3eb474f9b06916b7ef9ca35 2024-11-22T19:24:29,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/7c5dbe0e6d7245c5b0fd687888092d07 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/7c5dbe0e6d7245c5b0fd687888092d07 2024-11-22T19:24:29,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0395400e56a749e182a4193189a5ef6d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0395400e56a749e182a4193189a5ef6d 2024-11-22T19:24:29,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a45f27b566fe4da6889c34146e31395b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a45f27b566fe4da6889c34146e31395b 2024-11-22T19:24:29,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a5a86d957d2b4a8e904c11aa6607b035 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/a5a86d957d2b4a8e904c11aa6607b035 2024-11-22T19:24:29,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0e1a77aed91b44318bb6de79ab1df105 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/0e1a77aed91b44318bb6de79ab1df105 2024-11-22T19:24:29,422 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/recovered.edits/393.seqid, newMaxSeqId=393, maxSeqId=1 2024-11-22T19:24:29,423 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1. 2024-11-22T19:24:29,423 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for 6827bb75511a85c0992cc4b6522bf5f1: 2024-11-22T19:24:29,424 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed 6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:29,424 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=6827bb75511a85c0992cc4b6522bf5f1, regionState=CLOSED 2024-11-22T19:24:29,426 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-22T19:24:29,426 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure 6827bb75511a85c0992cc4b6522bf5f1, server=a307a1377457,35917,1732303314657 in 1.4630 sec 2024-11-22T19:24:29,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-22T19:24:29,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6827bb75511a85c0992cc4b6522bf5f1, UNASSIGN in 1.4660 sec 2024-11-22T19:24:29,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-22T19:24:29,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4670 sec 2024-11-22T19:24:29,429 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303469429"}]},"ts":"1732303469429"} 2024-11-22T19:24:29,429 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T19:24:29,431 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T19:24:29,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4780 sec 2024-11-22T19:24:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T19:24:30,060 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-22T19:24:30,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T19:24:30,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,061 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-22T19:24:30,062 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,063 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:30,064 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/recovered.edits] 2024-11-22T19:24:30,066 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/21a793da489f4d759b01c5eff704204e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/21a793da489f4d759b01c5eff704204e 2024-11-22T19:24:30,067 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b500b86cc1834eabb6c34733170c80ec to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/A/b500b86cc1834eabb6c34733170c80ec 2024-11-22T19:24:30,069 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7b922a6e446b428a815b1fb9081d4393 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/7b922a6e446b428a815b1fb9081d4393 2024-11-22T19:24:30,070 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/837ac07670e94b0c9640977ad51358de to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/B/837ac07670e94b0c9640977ad51358de 2024-11-22T19:24:30,072 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/09b1c2e9499c42fcb811284bb5541db0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/09b1c2e9499c42fcb811284bb5541db0 2024-11-22T19:24:30,072 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5f895d41b009423d8d6abfde9c259add to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/C/5f895d41b009423d8d6abfde9c259add 2024-11-22T19:24:30,074 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/recovered.edits/393.seqid to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1/recovered.edits/393.seqid 2024-11-22T19:24:30,075 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/6827bb75511a85c0992cc4b6522bf5f1 2024-11-22T19:24:30,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T19:24:30,076 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,078 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T19:24:30,079 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T19:24:30,080 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,080 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T19:24:30,080 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732303470080"}]},"ts":"9223372036854775807"} 2024-11-22T19:24:30,081 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T19:24:30,081 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6827bb75511a85c0992cc4b6522bf5f1, NAME => 'TestAcidGuarantees,,1732303436437.6827bb75511a85c0992cc4b6522bf5f1.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T19:24:30,081 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T19:24:30,081 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732303470081"}]},"ts":"9223372036854775807"} 2024-11-22T19:24:30,082 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T19:24:30,084 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 24 msec 2024-11-22T19:24:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-22T19:24:30,162 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-22T19:24:30,172 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237 (was 236) - Thread LEAK? -, OpenFileDescriptor=453 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=433 (was 533), ProcessCount=11 (was 11), AvailableMemoryMB=4799 (was 4815) 2024-11-22T19:24:30,180 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=433, ProcessCount=11, AvailableMemoryMB=4799 2024-11-22T19:24:30,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:24:30,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:24:30,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:30,182 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T19:24:30,183 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:30,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-11-22T19:24:30,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T19:24:30,183 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T19:24:30,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742454_1630 (size=963) 2024-11-22T19:24:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T19:24:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T19:24:30,589 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982 2024-11-22T19:24:30,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742455_1631 (size=53) 2024-11-22T19:24:30,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T19:24:30,994 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:24:30,994 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 457088d1c2889b36850d00978a970867, disabling compactions & flushes 2024-11-22T19:24:30,994 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:30,994 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:30,994 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. after waiting 0 ms 2024-11-22T19:24:30,994 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:30,994 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:30,994 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:30,995 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T19:24:30,996 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732303470995"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732303470995"}]},"ts":"1732303470995"} 2024-11-22T19:24:30,997 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T19:24:30,997 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T19:24:30,997 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303470997"}]},"ts":"1732303470997"} 2024-11-22T19:24:30,998 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T19:24:31,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, ASSIGN}] 2024-11-22T19:24:31,002 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, ASSIGN 2024-11-22T19:24:31,002 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, ASSIGN; state=OFFLINE, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=false 2024-11-22T19:24:31,153 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:31,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:24:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T19:24:31,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:31,308 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:31,308 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:24:31,308 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,308 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:24:31,308 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,308 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,309 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,310 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:24:31,311 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457088d1c2889b36850d00978a970867 columnFamilyName A 2024-11-22T19:24:31,311 DEBUG [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:31,311 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(327): Store=457088d1c2889b36850d00978a970867/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:24:31,311 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,312 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:24:31,312 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457088d1c2889b36850d00978a970867 columnFamilyName B 2024-11-22T19:24:31,312 DEBUG [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:31,313 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(327): Store=457088d1c2889b36850d00978a970867/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:24:31,313 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,313 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:24:31,313 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457088d1c2889b36850d00978a970867 columnFamilyName C 2024-11-22T19:24:31,313 DEBUG [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:31,314 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(327): Store=457088d1c2889b36850d00978a970867/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:24:31,314 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:31,314 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,315 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,316 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:24:31,317 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for 457088d1c2889b36850d00978a970867 2024-11-22T19:24:31,318 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T19:24:31,318 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened 457088d1c2889b36850d00978a970867; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58810837, jitterRate=-0.12365023791790009}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:24:31,319 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:31,320 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., pid=157, masterSystemTime=1732303471305 2024-11-22T19:24:31,321 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:31,321 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:31,321 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=OPEN, openSeqNum=2, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:31,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-22T19:24:31,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 in 168 msec 2024-11-22T19:24:31,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-22T19:24:31,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, ASSIGN in 322 msec 2024-11-22T19:24:31,324 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T19:24:31,324 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303471324"}]},"ts":"1732303471324"} 2024-11-22T19:24:31,325 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T19:24:31,327 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T19:24:31,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-11-22T19:24:32,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T19:24:32,287 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-22T19:24:32,288 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5765d46a to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d9954b7 2024-11-22T19:24:32,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fb684eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:32,295 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:32,296 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:32,297 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T19:24:32,297 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43636, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T19:24:32,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T19:24:32,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T19:24:32,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:32,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742456_1632 (size=999) 2024-11-22T19:24:32,708 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-22T19:24:32,708 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-22T19:24:32,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:24:32,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, REOPEN/MOVE}] 2024-11-22T19:24:32,712 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, REOPEN/MOVE 2024-11-22T19:24:32,712 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:32,713 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:24:32,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:24:32,864 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:32,865 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close 457088d1c2889b36850d00978a970867 2024-11-22T19:24:32,865 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:24:32,865 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing 457088d1c2889b36850d00978a970867, disabling compactions & flushes 2024-11-22T19:24:32,865 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:32,865 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:32,865 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. after waiting 0 ms 2024-11-22T19:24:32,865 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:32,868 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-22T19:24:32,869 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:32,869 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:32,869 WARN [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: 457088d1c2889b36850d00978a970867 to self. 2024-11-22T19:24:32,870 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed 457088d1c2889b36850d00978a970867 2024-11-22T19:24:32,870 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=CLOSED 2024-11-22T19:24:32,872 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-22T19:24:32,872 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 in 158 msec 2024-11-22T19:24:32,872 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, REOPEN/MOVE; state=CLOSED, location=a307a1377457,35917,1732303314657; forceNewPlan=false, retain=true 2024-11-22T19:24:33,023 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=OPENING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:24:33,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:33,177 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,177 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} 2024-11-22T19:24:33,178 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,178 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T19:24:33,178 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,178 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,179 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,180 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:24:33,180 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457088d1c2889b36850d00978a970867 columnFamilyName A 2024-11-22T19:24:33,181 DEBUG [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:33,181 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(327): Store=457088d1c2889b36850d00978a970867/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:24:33,182 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,182 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:24:33,182 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457088d1c2889b36850d00978a970867 columnFamilyName B 2024-11-22T19:24:33,182 DEBUG [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:33,183 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(327): Store=457088d1c2889b36850d00978a970867/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:24:33,183 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,183 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T19:24:33,183 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457088d1c2889b36850d00978a970867 columnFamilyName C 2024-11-22T19:24:33,183 DEBUG [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:33,184 INFO [StoreOpener-457088d1c2889b36850d00978a970867-1 {}] regionserver.HStore(327): Store=457088d1c2889b36850d00978a970867/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T19:24:33,184 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,185 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,185 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,186 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T19:24:33,187 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,188 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened 457088d1c2889b36850d00978a970867; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68056717, jitterRate=0.014124110341072083}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T19:24:33,189 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:33,189 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., pid=162, masterSystemTime=1732303473175 2024-11-22T19:24:33,190 DEBUG [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,190 INFO [RS_OPEN_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,191 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=OPEN, openSeqNum=5, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-11-22T19:24:33,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 in 167 msec 2024-11-22T19:24:33,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-22T19:24:33,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, REOPEN/MOVE in 481 msec 2024-11-22T19:24:33,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-22T19:24:33,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-22T19:24:33,196 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 896 msec 2024-11-22T19:24:33,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-22T19:24:33,198 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ac53e79 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d5efb7a 2024-11-22T19:24:33,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@644b7e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,202 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bc9c3e to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fc332d8 2024-11-22T19:24:33,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9b5141, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,207 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7181df3b to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17327621 2024-11-22T19:24:33,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a52cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,211 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11030ef5 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1584f18a 2024-11-22T19:24:33,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7fe431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,215 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69abefea to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b914bf4 2024-11-22T19:24:33,218 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@91d72db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,218 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-11-22T19:24:33,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,223 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-11-22T19:24:33,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a3b66d3 to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb6288a 2024-11-22T19:24:33,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58460ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cfdf76c to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6556601 2024-11-22T19:24:33,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8cd1ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,237 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68c2838a to 127.0.0.1:57120 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@458a85fd 2024-11-22T19:24:33,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d832d43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T19:24:33,244 DEBUG [hconnection-0x40911a20-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:33,245 DEBUG [hconnection-0x30408e85-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,245 DEBUG [hconnection-0x2e501fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,245 DEBUG [hconnection-0x3ba7f99c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,245 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32858, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,245 DEBUG [hconnection-0x5e894ce0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,245 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,245 DEBUG [hconnection-0x5ac32eaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,246 DEBUG [hconnection-0x15c72f12-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,246 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-22T19:24:33,246 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,247 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32894, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,247 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,247 DEBUG [hconnection-0x1aad9c3d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,247 DEBUG [hconnection-0x3fd87151-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,247 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32914, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:33,248 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32920, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,249 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:33,249 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:33,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:33,250 DEBUG [hconnection-0x54b3765c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T19:24:33,250 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,252 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32944, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T19:24:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:33,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:33,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:33,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:33,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:33,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:33,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:33,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303533272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303533274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303533274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303533275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303533275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fbbd7943cc1e448aacdc54559761882a_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303473255/Put/seqid=0 2024-11-22T19:24:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742457_1633 (size=12154) 2024-11-22T19:24:33,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:33,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303533375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303533377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303533377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303533378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303533379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,402 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:33,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:33,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:33,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:33,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:33,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:33,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:33,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303533579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303533579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303533580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303533580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303533583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,701 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:33,705 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fbbd7943cc1e448aacdc54559761882a_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fbbd7943cc1e448aacdc54559761882a_457088d1c2889b36850d00978a970867 2024-11-22T19:24:33,706 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8180b41d77b41d0949b2bb131963b3c, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:33,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8180b41d77b41d0949b2bb131963b3c is 175, key is test_row_0/A:col10/1732303473255/Put/seqid=0 2024-11-22T19:24:33,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:33,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:33,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:33,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742458_1634 (size=30955) 2024-11-22T19:24:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:33,859 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:33,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:33,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:33,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:33,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:33,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303533882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303533882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303533882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303533883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:33,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:33,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303533887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,012 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:34,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:34,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:34,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:34,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:34,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:34,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:34,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:34,111 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8180b41d77b41d0949b2bb131963b3c 2024-11-22T19:24:34,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/6f9cf7bfefb3436695b5d52aa5f0c2e0 is 50, key is test_row_0/B:col10/1732303473255/Put/seqid=0 2024-11-22T19:24:34,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742459_1635 (size=12001) 2024-11-22T19:24:34,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/6f9cf7bfefb3436695b5d52aa5f0c2e0 2024-11-22T19:24:34,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/99449147689d4c738948171c4276c064 is 50, key is test_row_0/C:col10/1732303473255/Put/seqid=0 2024-11-22T19:24:34,164 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:34,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742460_1636 (size=12001) 2024-11-22T19:24:34,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:34,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:34,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:34,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:34,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:34,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:34,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/99449147689d4c738948171c4276c064 2024-11-22T19:24:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:34,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8180b41d77b41d0949b2bb131963b3c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c 2024-11-22T19:24:34,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c, entries=150, sequenceid=17, filesize=30.2 K 2024-11-22T19:24:34,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/6f9cf7bfefb3436695b5d52aa5f0c2e0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/6f9cf7bfefb3436695b5d52aa5f0c2e0 2024-11-22T19:24:34,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/6f9cf7bfefb3436695b5d52aa5f0c2e0, entries=150, sequenceid=17, filesize=11.7 K 2024-11-22T19:24:34,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/99449147689d4c738948171c4276c064 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/99449147689d4c738948171c4276c064 2024-11-22T19:24:34,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/99449147689d4c738948171c4276c064, entries=150, sequenceid=17, filesize=11.7 K 2024-11-22T19:24:34,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 457088d1c2889b36850d00978a970867 in 927ms, sequenceid=17, compaction requested=false 2024-11-22T19:24:34,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:34,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:34,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T19:24:34,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:34,317 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:24:34,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:34,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:34,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:34,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:34,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:34,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:34,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224ac68bf1cca541789659134665c3a400_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303473274/Put/seqid=0 2024-11-22T19:24:34,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742461_1637 (size=12154) 2024-11-22T19:24:34,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:34,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:34,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:34,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303534391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303534392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303534421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303534421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303534421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303534522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303534524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303534524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303534524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303534726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303534727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303534727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303534728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:34,732 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224ac68bf1cca541789659134665c3a400_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224ac68bf1cca541789659134665c3a400_457088d1c2889b36850d00978a970867 2024-11-22T19:24:34,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/139a876358cf4fad85496d87ce7d5651, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/139a876358cf4fad85496d87ce7d5651 is 175, key is test_row_0/A:col10/1732303473274/Put/seqid=0 2024-11-22T19:24:34,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742462_1638 (size=30955) 2024-11-22T19:24:34,949 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T19:24:35,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303535030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303535030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303535031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303535032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,138 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/139a876358cf4fad85496d87ce7d5651 2024-11-22T19:24:35,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/c5056491f20347c59665d3fa17c4b19c is 50, key is test_row_0/B:col10/1732303473274/Put/seqid=0 2024-11-22T19:24:35,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742463_1639 (size=12001) 2024-11-22T19:24:35,150 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/c5056491f20347c59665d3fa17c4b19c 2024-11-22T19:24:35,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/8ccfbd7be07641afb7c4d90ccf2eeafc is 50, key is test_row_0/C:col10/1732303473274/Put/seqid=0 2024-11-22T19:24:35,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742464_1640 (size=12001) 2024-11-22T19:24:35,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:35,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303535428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303535533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303535534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303535536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:35,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303535537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:35,578 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/8ccfbd7be07641afb7c4d90ccf2eeafc 2024-11-22T19:24:35,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/139a876358cf4fad85496d87ce7d5651 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651 2024-11-22T19:24:35,585 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651, entries=150, sequenceid=41, filesize=30.2 K 2024-11-22T19:24:35,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/c5056491f20347c59665d3fa17c4b19c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/c5056491f20347c59665d3fa17c4b19c 2024-11-22T19:24:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,589 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/c5056491f20347c59665d3fa17c4b19c, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T19:24:35,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/8ccfbd7be07641afb7c4d90ccf2eeafc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/8ccfbd7be07641afb7c4d90ccf2eeafc 2024-11-22T19:24:35,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,593 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/8ccfbd7be07641afb7c4d90ccf2eeafc, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T19:24:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,594 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 457088d1c2889b36850d00978a970867 in 1276ms, sequenceid=41, compaction requested=false 2024-11-22T19:24:35,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:35,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:35,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-22T19:24:35,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-22T19:24:35,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-22T19:24:35,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3450 sec 2024-11-22T19:24:35,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.3530 sec 2024-11-22T19:24:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:36,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T19:24:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:36,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:36,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:36,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:36,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122eed753f6eb23478f9869680c4d6bc8ea_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742465_1641 (size=14594) 2024-11-22T19:24:36,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,562 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,567 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122eed753f6eb23478f9869680c4d6bc8ea_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122eed753f6eb23478f9869680c4d6bc8ea_457088d1c2889b36850d00978a970867 2024-11-22T19:24:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,568 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/e390eb3dc56b4c2a9f7148c36aaf4b0c, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/e390eb3dc56b4c2a9f7148c36aaf4b0c is 175, key is test_row_0/A:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742466_1642 (size=39549) 2024-11-22T19:24:36,576 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/e390eb3dc56b4c2a9f7148c36aaf4b0c 2024-11-22T19:24:36,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303536576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303536577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303536577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303536578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/1f855c725ace4e9c8928739e5d18a9e4 is 50, key is test_row_0/B:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742467_1643 (size=12001) 2024-11-22T19:24:36,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/1f855c725ace4e9c8928739e5d18a9e4 2024-11-22T19:24:36,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/02f474b7e90143d3b044f3df477a2efd is 50, key is test_row_0/C:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742468_1644 (size=12001) 2024-11-22T19:24:36,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/02f474b7e90143d3b044f3df477a2efd 2024-11-22T19:24:36,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/e390eb3dc56b4c2a9f7148c36aaf4b0c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c 2024-11-22T19:24:36,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c, entries=200, sequenceid=56, filesize=38.6 K 2024-11-22T19:24:36,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/1f855c725ace4e9c8928739e5d18a9e4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/1f855c725ace4e9c8928739e5d18a9e4 2024-11-22T19:24:36,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/1f855c725ace4e9c8928739e5d18a9e4, entries=150, sequenceid=56, filesize=11.7 K 2024-11-22T19:24:36,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/02f474b7e90143d3b044f3df477a2efd as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/02f474b7e90143d3b044f3df477a2efd 2024-11-22T19:24:36,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/02f474b7e90143d3b044f3df477a2efd, entries=150, sequenceid=56, filesize=11.7 K 2024-11-22T19:24:36,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 457088d1c2889b36850d00978a970867 in 123ms, sequenceid=56, compaction requested=true 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:36,670 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:36,670 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:36,671 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:36,671 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:36,672 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:36,672 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:36,672 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:36,672 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:36,672 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=99.1 K 2024-11-22T19:24:36,672 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/6f9cf7bfefb3436695b5d52aa5f0c2e0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/c5056491f20347c59665d3fa17c4b19c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/1f855c725ace4e9c8928739e5d18a9e4] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=35.2 K 2024-11-22T19:24:36,672 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:36,672 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c] 2024-11-22T19:24:36,673 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f9cf7bfefb3436695b5d52aa5f0c2e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732303473250 2024-11-22T19:24:36,673 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8180b41d77b41d0949b2bb131963b3c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732303473250 2024-11-22T19:24:36,673 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 139a876358cf4fad85496d87ce7d5651, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732303473270 2024-11-22T19:24:36,673 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c5056491f20347c59665d3fa17c4b19c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732303473270 2024-11-22T19:24:36,674 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f855c725ace4e9c8928739e5d18a9e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732303474396 2024-11-22T19:24:36,674 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e390eb3dc56b4c2a9f7148c36aaf4b0c, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732303474393 2024-11-22T19:24:36,679 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:36,680 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#554 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:36,681 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/103b96bfbfa84183be247c3a8a5de674 is 50, key is test_row_0/B:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,681 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411221585f66271e341cab4becd216fc0b63e_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:36,682 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411221585f66271e341cab4becd216fc0b63e_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:36,682 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221585f66271e341cab4becd216fc0b63e_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:36,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:24:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:36,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742470_1646 (size=4469) 2024-11-22T19:24:36,688 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#555 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:36,689 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a8b5475a989f4207baa694b096958f3c is 175, key is test_row_0/A:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742469_1645 (size=12104) 2024-11-22T19:24:36,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ad952cb6154a4620b16aad7800f1cbb1_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303476685/Put/seqid=0 2024-11-22T19:24:36,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303536694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303536695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303536696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303536696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,699 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/103b96bfbfa84183be247c3a8a5de674 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/103b96bfbfa84183be247c3a8a5de674 2024-11-22T19:24:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742471_1647 (size=31058) 2024-11-22T19:24:36,705 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into 103b96bfbfa84183be247c3a8a5de674(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:36,705 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:36,705 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=13, startTime=1732303476670; duration=0sec 2024-11-22T19:24:36,705 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:36,705 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:36,705 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:36,706 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:36,706 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:36,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742472_1648 (size=17034) 2024-11-22T19:24:36,706 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:36,707 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/99449147689d4c738948171c4276c064, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/8ccfbd7be07641afb7c4d90ccf2eeafc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/02f474b7e90143d3b044f3df477a2efd] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=35.2 K 2024-11-22T19:24:36,707 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 99449147689d4c738948171c4276c064, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732303473250 2024-11-22T19:24:36,707 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ccfbd7be07641afb7c4d90ccf2eeafc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732303473270 2024-11-22T19:24:36,707 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 02f474b7e90143d3b044f3df477a2efd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732303474396 2024-11-22T19:24:36,713 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#557 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:36,714 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/491689f4e07141c7b9d9b4b24fc388d2 is 50, key is test_row_0/C:col10/1732303476546/Put/seqid=0 2024-11-22T19:24:36,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742473_1649 (size=12104) 2024-11-22T19:24:36,722 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/491689f4e07141c7b9d9b4b24fc388d2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/491689f4e07141c7b9d9b4b24fc388d2 2024-11-22T19:24:36,726 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into 491689f4e07141c7b9d9b4b24fc388d2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:36,726 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:36,726 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=13, startTime=1732303476670; duration=0sec 2024-11-22T19:24:36,726 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:36,726 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303536799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303536799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303536799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:36,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303536800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303537001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303537001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303537002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303537002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,107 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a8b5475a989f4207baa694b096958f3c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a8b5475a989f4207baa694b096958f3c 2024-11-22T19:24:37,107 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:37,110 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ad952cb6154a4620b16aad7800f1cbb1_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ad952cb6154a4620b16aad7800f1cbb1_457088d1c2889b36850d00978a970867 2024-11-22T19:24:37,111 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into a8b5475a989f4207baa694b096958f3c(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:37,111 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:37,111 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=13, startTime=1732303476670; duration=0sec 2024-11-22T19:24:37,111 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8820c493d324c0d8d30468cf4f68cd4, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:37,111 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:37,111 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:37,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8820c493d324c0d8d30468cf4f68cd4 is 175, key is test_row_0/A:col10/1732303476685/Put/seqid=0 2024-11-22T19:24:37,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742474_1650 (size=48139) 2024-11-22T19:24:37,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303537304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303537305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303537305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303537306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T19:24:37,353 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-22T19:24:37,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-22T19:24:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T19:24:37,355 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:37,356 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:37,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:37,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303537440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,442 DEBUG [Thread-2772 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T19:24:37,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:37,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-22T19:24:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,515 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8820c493d324c0d8d30468cf4f68cd4 2024-11-22T19:24:37,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/a3624074c1504e318d92c1d075bfe8b3 is 50, key is test_row_0/B:col10/1732303476685/Put/seqid=0 2024-11-22T19:24:37,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742475_1651 (size=12001) 2024-11-22T19:24:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T19:24:37,660 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:37,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-22T19:24:37,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:37,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303537806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303537807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303537807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303537808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:37,812 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:37,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-22T19:24:37,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:37,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:37,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/a3624074c1504e318d92c1d075bfe8b3 2024-11-22T19:24:37,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/2a5c1f2059a54e5fbccbf2a20c0760bc is 50, key is test_row_0/C:col10/1732303476685/Put/seqid=0 2024-11-22T19:24:37,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742476_1652 (size=12001) 2024-11-22T19:24:37,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/2a5c1f2059a54e5fbccbf2a20c0760bc 2024-11-22T19:24:37,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b8820c493d324c0d8d30468cf4f68cd4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4 2024-11-22T19:24:37,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4, entries=250, sequenceid=82, filesize=47.0 K 2024-11-22T19:24:37,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/a3624074c1504e318d92c1d075bfe8b3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/a3624074c1504e318d92c1d075bfe8b3 2024-11-22T19:24:37,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/a3624074c1504e318d92c1d075bfe8b3, entries=150, sequenceid=82, filesize=11.7 K 2024-11-22T19:24:37,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/2a5c1f2059a54e5fbccbf2a20c0760bc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/2a5c1f2059a54e5fbccbf2a20c0760bc 2024-11-22T19:24:37,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/2a5c1f2059a54e5fbccbf2a20c0760bc, entries=150, sequenceid=82, filesize=11.7 K 2024-11-22T19:24:37,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 457088d1c2889b36850d00978a970867 in 1265ms, sequenceid=82, compaction requested=false 2024-11-22T19:24:37,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T19:24:37,964 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:37,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-22T19:24:37,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:37,964 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:37,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411226a3656e7705e4509ab01f58eea5bd38e_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303476689/Put/seqid=0 2024-11-22T19:24:37,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742477_1653 (size=12154) 2024-11-22T19:24:37,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:37,979 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411226a3656e7705e4509ab01f58eea5bd38e_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411226a3656e7705e4509ab01f58eea5bd38e_457088d1c2889b36850d00978a970867 2024-11-22T19:24:37,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/ef2603f59124464c882e074445a1388a, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:37,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/ef2603f59124464c882e074445a1388a is 175, key is test_row_0/A:col10/1732303476689/Put/seqid=0 2024-11-22T19:24:37,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742478_1654 (size=30955) 2024-11-22T19:24:38,384 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/ef2603f59124464c882e074445a1388a 2024-11-22T19:24:38,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7461e2b7cd1a46e3a786d2f9afeed61e is 50, key is test_row_0/B:col10/1732303476689/Put/seqid=0 2024-11-22T19:24:38,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742479_1655 (size=12001) 2024-11-22T19:24:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T19:24:38,795 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7461e2b7cd1a46e3a786d2f9afeed61e 2024-11-22T19:24:38,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/effde2e30a6045e4a0371768d7ad799c is 50, key is test_row_0/C:col10/1732303476689/Put/seqid=0 2024-11-22T19:24:38,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742480_1656 (size=12001) 2024-11-22T19:24:38,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:38,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:38,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303538833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303538834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303538835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303538836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303538937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303538937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303538938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:38,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303538940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303539139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303539140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303539141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303539142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,205 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/effde2e30a6045e4a0371768d7ad799c 2024-11-22T19:24:39,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/ef2603f59124464c882e074445a1388a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a 2024-11-22T19:24:39,212 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a, entries=150, sequenceid=96, filesize=30.2 K 2024-11-22T19:24:39,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7461e2b7cd1a46e3a786d2f9afeed61e as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7461e2b7cd1a46e3a786d2f9afeed61e 2024-11-22T19:24:39,216 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7461e2b7cd1a46e3a786d2f9afeed61e, entries=150, sequenceid=96, filesize=11.7 K 2024-11-22T19:24:39,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/effde2e30a6045e4a0371768d7ad799c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/effde2e30a6045e4a0371768d7ad799c 2024-11-22T19:24:39,219 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/effde2e30a6045e4a0371768d7ad799c, entries=150, sequenceid=96, filesize=11.7 K 2024-11-22T19:24:39,220 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 457088d1c2889b36850d00978a970867 in 1256ms, sequenceid=96, compaction requested=true 2024-11-22T19:24:39,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:39,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-22T19:24:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-22T19:24:39,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-22T19:24:39,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8650 sec 2024-11-22T19:24:39,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.8680 sec 2024-11-22T19:24:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:39,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T19:24:39,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:39,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:39,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:39,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:39,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:39,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:39,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303539450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122336fd31563bc4d13ad46545da3c186df_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:39,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303539451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303539452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303539453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742481_1657 (size=17034) 2024-11-22T19:24:39,457 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:39,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T19:24:39,459 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-22T19:24:39,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:39,461 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122336fd31563bc4d13ad46545da3c186df_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122336fd31563bc4d13ad46545da3c186df_457088d1c2889b36850d00978a970867 2024-11-22T19:24:39,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-22T19:24:39,461 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c3f7234f6c9f408789191ceb4d7fe44c, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:39,462 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:39,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T19:24:39,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c3f7234f6c9f408789191ceb4d7fe44c is 175, key is test_row_0/A:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:39,462 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:39,462 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:39,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742482_1658 (size=48139) 2024-11-22T19:24:39,466 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c3f7234f6c9f408789191ceb4d7fe44c 2024-11-22T19:24:39,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3eea0274293d4d69b73551ca1713049f is 50, key is test_row_0/B:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:39,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742483_1659 (size=12001) 2024-11-22T19:24:39,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3eea0274293d4d69b73551ca1713049f 2024-11-22T19:24:39,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/dfcb1b9699dd4deb9af8d06464fe56ef is 50, key is test_row_0/C:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:39,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742484_1660 (size=12001) 2024-11-22T19:24:39,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303539554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303539555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303539555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303539556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T19:24:39,614 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:39,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T19:24:39,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:39,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:39,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:39,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:39,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303539757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303539757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303539757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:39,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303539759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:39,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T19:24:39,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:39,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T19:24:39,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:39,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:39,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:39,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:39,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/dfcb1b9699dd4deb9af8d06464fe56ef 2024-11-22T19:24:39,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c3f7234f6c9f408789191ceb4d7fe44c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c 2024-11-22T19:24:39,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c, entries=250, sequenceid=123, filesize=47.0 K 2024-11-22T19:24:39,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3eea0274293d4d69b73551ca1713049f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3eea0274293d4d69b73551ca1713049f 2024-11-22T19:24:39,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3eea0274293d4d69b73551ca1713049f, entries=150, sequenceid=123, filesize=11.7 K 2024-11-22T19:24:39,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/dfcb1b9699dd4deb9af8d06464fe56ef as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/dfcb1b9699dd4deb9af8d06464fe56ef 2024-11-22T19:24:39,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/dfcb1b9699dd4deb9af8d06464fe56ef, entries=150, sequenceid=123, filesize=11.7 K 2024-11-22T19:24:39,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 457088d1c2889b36850d00978a970867 in 453ms, sequenceid=123, compaction requested=true 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:39,899 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:39,899 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:39,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:39,900 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:39,900 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 158291 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:39,900 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:39,900 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:39,900 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,901 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,901 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/103b96bfbfa84183be247c3a8a5de674, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/a3624074c1504e318d92c1d075bfe8b3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7461e2b7cd1a46e3a786d2f9afeed61e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3eea0274293d4d69b73551ca1713049f] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=47.0 K 2024-11-22T19:24:39,901 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a8b5475a989f4207baa694b096958f3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=154.6 K 2024-11-22T19:24:39,901 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,901 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a8b5475a989f4207baa694b096958f3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c] 2024-11-22T19:24:39,901 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 103b96bfbfa84183be247c3a8a5de674, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732303474396 2024-11-22T19:24:39,901 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a3624074c1504e318d92c1d075bfe8b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732303476683 2024-11-22T19:24:39,901 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8b5475a989f4207baa694b096958f3c, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732303474396 2024-11-22T19:24:39,901 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7461e2b7cd1a46e3a786d2f9afeed61e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732303476689 2024-11-22T19:24:39,901 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8820c493d324c0d8d30468cf4f68cd4, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732303476574 2024-11-22T19:24:39,902 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eea0274293d4d69b73551ca1713049f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732303478829 2024-11-22T19:24:39,902 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef2603f59124464c882e074445a1388a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732303476689 2024-11-22T19:24:39,902 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3f7234f6c9f408789191ceb4d7fe44c, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732303478829 2024-11-22T19:24:39,908 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#566 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:39,909 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/5ff71c9465d14526aeb1021cf5204f59 is 50, key is test_row_0/B:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:39,910 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:39,912 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122632eff1509a146478d31f5144ac3ee1f_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:39,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742485_1661 (size=12241) 2024-11-22T19:24:39,915 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122632eff1509a146478d31f5144ac3ee1f_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:39,915 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122632eff1509a146478d31f5144ac3ee1f_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:39,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742486_1662 (size=4469) 2024-11-22T19:24:39,919 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:39,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:39,919 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:39,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112228b010a5a81043c78e48032717b72343_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303479452/Put/seqid=0 2024-11-22T19:24:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742487_1663 (size=12304) 2024-11-22T19:24:39,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:39,930 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112228b010a5a81043c78e48032717b72343_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112228b010a5a81043c78e48032717b72343_457088d1c2889b36850d00978a970867 2024-11-22T19:24:39,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b00295514887438fa396758c7496d90f, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:39,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b00295514887438fa396758c7496d90f is 175, key is test_row_0/A:col10/1732303479452/Put/seqid=0 2024-11-22T19:24:39,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742488_1664 (size=31105) 2024-11-22T19:24:39,934 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b00295514887438fa396758c7496d90f 2024-11-22T19:24:39,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/fbb2eb9e28a945a6b885bdcc6634e2be is 50, key is test_row_0/B:col10/1732303479452/Put/seqid=0 2024-11-22T19:24:39,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742489_1665 (size=12151) 2024-11-22T19:24:39,943 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/fbb2eb9e28a945a6b885bdcc6634e2be 2024-11-22T19:24:39,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ca6873a9826a40448ad8e16ac3e80557 is 50, key is test_row_0/C:col10/1732303479452/Put/seqid=0 2024-11-22T19:24:39,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742490_1666 (size=12151) 2024-11-22T19:24:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:40,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T19:24:40,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303540081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303540082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303540083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303540084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303540186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303540186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303540186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303540188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,317 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/5ff71c9465d14526aeb1021cf5204f59 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/5ff71c9465d14526aeb1021cf5204f59 2024-11-22T19:24:40,319 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#567 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:40,320 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/d08714a47e9348728596084aaf096328 is 175, key is test_row_0/A:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:40,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742491_1667 (size=31195) 2024-11-22T19:24:40,324 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into 5ff71c9465d14526aeb1021cf5204f59(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:40,324 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:40,324 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=12, startTime=1732303479899; duration=0sec 2024-11-22T19:24:40,324 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:40,324 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:40,324 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:40,325 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:40,325 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:40,325 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:40,325 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/491689f4e07141c7b9d9b4b24fc388d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/2a5c1f2059a54e5fbccbf2a20c0760bc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/effde2e30a6045e4a0371768d7ad799c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/dfcb1b9699dd4deb9af8d06464fe56ef] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=47.0 K 2024-11-22T19:24:40,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 491689f4e07141c7b9d9b4b24fc388d2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732303474396 2024-11-22T19:24:40,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a5c1f2059a54e5fbccbf2a20c0760bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732303476683 2024-11-22T19:24:40,326 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting effde2e30a6045e4a0371768d7ad799c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732303476689 2024-11-22T19:24:40,327 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting dfcb1b9699dd4deb9af8d06464fe56ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732303478829 2024-11-22T19:24:40,332 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#571 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:40,333 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/948338b6a64a4fa0ae78dfc345006473 is 50, key is test_row_0/C:col10/1732303478834/Put/seqid=0 2024-11-22T19:24:40,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742492_1668 (size=12241) 2024-11-22T19:24:40,350 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ca6873a9826a40448ad8e16ac3e80557 2024-11-22T19:24:40,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/b00295514887438fa396758c7496d90f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f 2024-11-22T19:24:40,356 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f, entries=150, sequenceid=132, filesize=30.4 K 2024-11-22T19:24:40,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/fbb2eb9e28a945a6b885bdcc6634e2be as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/fbb2eb9e28a945a6b885bdcc6634e2be 2024-11-22T19:24:40,360 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/fbb2eb9e28a945a6b885bdcc6634e2be, entries=150, sequenceid=132, filesize=11.9 K 2024-11-22T19:24:40,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ca6873a9826a40448ad8e16ac3e80557 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ca6873a9826a40448ad8e16ac3e80557 2024-11-22T19:24:40,364 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ca6873a9826a40448ad8e16ac3e80557, entries=150, sequenceid=132, filesize=11.9 K 2024-11-22T19:24:40,364 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 457088d1c2889b36850d00978a970867 in 445ms, sequenceid=132, compaction requested=false 2024-11-22T19:24:40,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:40,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:40,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-22T19:24:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-22T19:24:40,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-22T19:24:40,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 903 msec 2024-11-22T19:24:40,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 907 msec 2024-11-22T19:24:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:40,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T19:24:40,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:40,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:40,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:40,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:40,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:40,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:40,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c638da91324146a8b2973f95a5887eec_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:40,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303540394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303540396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303540397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303540398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742493_1669 (size=12304) 2024-11-22T19:24:40,400 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:40,403 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c638da91324146a8b2973f95a5887eec_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c638da91324146a8b2973f95a5887eec_457088d1c2889b36850d00978a970867 2024-11-22T19:24:40,404 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/8e8b981157be4a6980939cbe4bb38eed, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:40,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/8e8b981157be4a6980939cbe4bb38eed is 175, key is test_row_0/A:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742494_1670 (size=31105) 2024-11-22T19:24:40,410 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/8e8b981157be4a6980939cbe4bb38eed 2024-11-22T19:24:40,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/f8b23b87104f43d2ba1c17444f3efccb is 50, key is test_row_0/B:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742495_1671 (size=12151) 2024-11-22T19:24:40,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303540500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303540501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303540501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303540501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T19:24:40,564 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-22T19:24:40,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-22T19:24:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T19:24:40,567 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:40,567 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:40,568 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T19:24:40,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303540703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303540703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303540704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303540704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:40,719 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:40,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T19:24:40,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:40,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:40,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:40,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:40,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:40,728 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/d08714a47e9348728596084aaf096328 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d08714a47e9348728596084aaf096328 2024-11-22T19:24:40,731 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into d08714a47e9348728596084aaf096328(size=30.5 K), total size for store is 60.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:40,732 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:40,732 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=12, startTime=1732303479899; duration=0sec 2024-11-22T19:24:40,732 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:40,732 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:40,739 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/948338b6a64a4fa0ae78dfc345006473 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/948338b6a64a4fa0ae78dfc345006473 2024-11-22T19:24:40,742 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into 948338b6a64a4fa0ae78dfc345006473(size=12.0 K), total size for store is 23.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:40,742 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:40,742 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=12, startTime=1732303479899; duration=0sec 2024-11-22T19:24:40,743 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:40,743 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:40,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/f8b23b87104f43d2ba1c17444f3efccb 2024-11-22T19:24:40,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/f338defc9282438d8e719fdd90ccc9f7 is 50, key is test_row_0/C:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:40,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742496_1672 (size=12151) 2024-11-22T19:24:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T19:24:40,871 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:40,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T19:24:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:40,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303541006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303541006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303541006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303541008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,024 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:41,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T19:24:41,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:41,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T19:24:41,177 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:41,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T19:24:41,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:41,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:41,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/f338defc9282438d8e719fdd90ccc9f7 2024-11-22T19:24:41,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/8e8b981157be4a6980939cbe4bb38eed as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed 2024-11-22T19:24:41,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed, entries=150, sequenceid=161, filesize=30.4 K 2024-11-22T19:24:41,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/f8b23b87104f43d2ba1c17444f3efccb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/f8b23b87104f43d2ba1c17444f3efccb 2024-11-22T19:24:41,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/f8b23b87104f43d2ba1c17444f3efccb, entries=150, sequenceid=161, filesize=11.9 K 2024-11-22T19:24:41,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/f338defc9282438d8e719fdd90ccc9f7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/f338defc9282438d8e719fdd90ccc9f7 2024-11-22T19:24:41,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/f338defc9282438d8e719fdd90ccc9f7, entries=150, sequenceid=161, filesize=11.9 K 2024-11-22T19:24:41,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 457088d1c2889b36850d00978a970867 in 853ms, sequenceid=161, compaction requested=true 2024-11-22T19:24:41,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:41,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:41,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:41,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:41,244 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:41,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:41,244 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:41,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:41,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93405 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:41,245 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,245 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,245 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d08714a47e9348728596084aaf096328, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=91.2 K 2024-11-22T19:24:41,245 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/5ff71c9465d14526aeb1021cf5204f59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/fbb2eb9e28a945a6b885bdcc6634e2be, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/f8b23b87104f43d2ba1c17444f3efccb] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=35.7 K 2024-11-22T19:24:41,245 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d08714a47e9348728596084aaf096328, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed] 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ff71c9465d14526aeb1021cf5204f59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732303478829 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d08714a47e9348728596084aaf096328, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732303478829 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b00295514887438fa396758c7496d90f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732303479450 2024-11-22T19:24:41,245 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting fbb2eb9e28a945a6b885bdcc6634e2be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732303479450 2024-11-22T19:24:41,246 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e8b981157be4a6980939cbe4bb38eed, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732303480081 2024-11-22T19:24:41,246 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting f8b23b87104f43d2ba1c17444f3efccb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732303480081 2024-11-22T19:24:41,250 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:41,252 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#576 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:41,252 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112217db1b8f81b043b2ae3f26f972974638_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:41,253 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/42262c42b2d74291ba4f4a139d19dc68 is 50, key is test_row_0/B:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:41,253 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112217db1b8f81b043b2ae3f26f972974638_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:41,254 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112217db1b8f81b043b2ae3f26f972974638_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:41,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742497_1673 (size=12493) 2024-11-22T19:24:41,272 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/42262c42b2d74291ba4f4a139d19dc68 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/42262c42b2d74291ba4f4a139d19dc68 2024-11-22T19:24:41,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742498_1674 (size=4469) 2024-11-22T19:24:41,274 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#575 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:41,275 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/2d56608fc61b4f2fa76a6521c495d11f is 175, key is test_row_0/A:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:41,277 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into 42262c42b2d74291ba4f4a139d19dc68(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:41,277 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:41,277 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=13, startTime=1732303481244; duration=0sec 2024-11-22T19:24:41,277 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:41,277 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:41,277 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:41,278 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:41,279 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:41,279 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,279 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/948338b6a64a4fa0ae78dfc345006473, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ca6873a9826a40448ad8e16ac3e80557, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/f338defc9282438d8e719fdd90ccc9f7] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=35.7 K 2024-11-22T19:24:41,279 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 948338b6a64a4fa0ae78dfc345006473, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732303478829 2024-11-22T19:24:41,279 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ca6873a9826a40448ad8e16ac3e80557, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732303479450 2024-11-22T19:24:41,280 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting f338defc9282438d8e719fdd90ccc9f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732303480081 2024-11-22T19:24:41,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742499_1675 (size=31447) 2024-11-22T19:24:41,287 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#577 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:41,287 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ed7324cf9fd5409c94b0dff38e58e0cb is 50, key is test_row_0/C:col10/1732303480083/Put/seqid=0 2024-11-22T19:24:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742500_1676 (size=12493) 2024-11-22T19:24:41,293 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ed7324cf9fd5409c94b0dff38e58e0cb as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ed7324cf9fd5409c94b0dff38e58e0cb 2024-11-22T19:24:41,297 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into ed7324cf9fd5409c94b0dff38e58e0cb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:41,297 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:41,297 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=13, startTime=1732303481244; duration=0sec 2024-11-22T19:24:41,297 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:41,297 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:41,329 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:41,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:41,330 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:41,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:41,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229bc5cdb7cf8e4b1aa2a1bcb270c569ed_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303480396/Put/seqid=0 2024-11-22T19:24:41,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742501_1677 (size=12304) 2024-11-22T19:24:41,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:41,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303541508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303541508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303541510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303541511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303541512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303541611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:41,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T19:24:41,683 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/2d56608fc61b4f2fa76a6521c495d11f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d56608fc61b4f2fa76a6521c495d11f 2024-11-22T19:24:41,687 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into 2d56608fc61b4f2fa76a6521c495d11f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:41,687 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:41,687 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=13, startTime=1732303481244; duration=0sec 2024-11-22T19:24:41,687 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:41,687 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:41,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:41,746 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229bc5cdb7cf8e4b1aa2a1bcb270c569ed_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229bc5cdb7cf8e4b1aa2a1bcb270c569ed_457088d1c2889b36850d00978a970867 2024-11-22T19:24:41,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c7376d692f9b48b897dce865b69b7145, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:41,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c7376d692f9b48b897dce865b69b7145 is 175, key is test_row_0/A:col10/1732303480396/Put/seqid=0 2024-11-22T19:24:41,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742502_1678 (size=31105) 2024-11-22T19:24:41,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:41,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303541812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303542115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,151 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c7376d692f9b48b897dce865b69b7145 2024-11-22T19:24:42,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7e7c8811c6d44f84b84f5e3072819507 is 50, key is test_row_0/B:col10/1732303480396/Put/seqid=0 2024-11-22T19:24:42,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742503_1679 (size=12151) 2024-11-22T19:24:42,165 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7e7c8811c6d44f84b84f5e3072819507 2024-11-22T19:24:42,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/a7cefca04fc542fbace412e6c8bc57cc is 50, key is test_row_0/C:col10/1732303480396/Put/seqid=0 2024-11-22T19:24:42,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742504_1680 (size=12151) 2024-11-22T19:24:42,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303542511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303542515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303542518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303542519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,579 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/a7cefca04fc542fbace412e6c8bc57cc 2024-11-22T19:24:42,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c7376d692f9b48b897dce865b69b7145 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145 2024-11-22T19:24:42,587 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145, entries=150, sequenceid=174, filesize=30.4 K 2024-11-22T19:24:42,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7e7c8811c6d44f84b84f5e3072819507 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7e7c8811c6d44f84b84f5e3072819507 2024-11-22T19:24:42,591 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7e7c8811c6d44f84b84f5e3072819507, entries=150, sequenceid=174, filesize=11.9 K 2024-11-22T19:24:42,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/a7cefca04fc542fbace412e6c8bc57cc as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/a7cefca04fc542fbace412e6c8bc57cc 2024-11-22T19:24:42,595 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/a7cefca04fc542fbace412e6c8bc57cc, entries=150, sequenceid=174, filesize=11.9 K 2024-11-22T19:24:42,596 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 457088d1c2889b36850d00978a970867 in 1266ms, sequenceid=174, compaction requested=false 2024-11-22T19:24:42,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:42,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:42,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-22T19:24:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-22T19:24:42,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-22T19:24:42,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0290 sec 2024-11-22T19:24:42,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.0330 sec 2024-11-22T19:24:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:42,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T19:24:42,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:42,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:42,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:42,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:42,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:42,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:42,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b03684c97fef403fab2c13b7df2db3ab_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742505_1681 (size=12304) 2024-11-22T19:24:42,634 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:42,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303542636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,640 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b03684c97fef403fab2c13b7df2db3ab_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b03684c97fef403fab2c13b7df2db3ab_457088d1c2889b36850d00978a970867 2024-11-22T19:24:42,641 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/96342b60cb2d41d38f512963ae1e9b7b, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:42,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/96342b60cb2d41d38f512963ae1e9b7b is 175, key is test_row_0/A:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:42,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742506_1682 (size=31105) 2024-11-22T19:24:42,666 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=203, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/96342b60cb2d41d38f512963ae1e9b7b 2024-11-22T19:24:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T19:24:42,671 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-22T19:24:42,673 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-22T19:24:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T19:24:42,674 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:42,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/60d374b4e9a04f48b81a64287ab05f5b is 50, key is test_row_0/B:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:42,675 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:42,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:42,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742507_1683 (size=12151) 2024-11-22T19:24:42,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303542739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T19:24:42,826 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:42,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-22T19:24:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:42,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:42,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303542941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:42,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T19:24:42,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:42,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-22T19:24:42,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:42,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:42,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:42,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:42,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:43,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/60d374b4e9a04f48b81a64287ab05f5b 2024-11-22T19:24:43,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/85ca0cd18e924d5b885aa53b27c88494 is 50, key is test_row_0/C:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:43,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742508_1684 (size=12151) 2024-11-22T19:24:43,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/85ca0cd18e924d5b885aa53b27c88494 2024-11-22T19:24:43,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/96342b60cb2d41d38f512963ae1e9b7b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b 2024-11-22T19:24:43,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b, entries=150, sequenceid=203, filesize=30.4 K 2024-11-22T19:24:43,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/60d374b4e9a04f48b81a64287ab05f5b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/60d374b4e9a04f48b81a64287ab05f5b 2024-11-22T19:24:43,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/60d374b4e9a04f48b81a64287ab05f5b, entries=150, sequenceid=203, filesize=11.9 K 2024-11-22T19:24:43,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/85ca0cd18e924d5b885aa53b27c88494 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/85ca0cd18e924d5b885aa53b27c88494 2024-11-22T19:24:43,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/85ca0cd18e924d5b885aa53b27c88494, entries=150, sequenceid=203, filesize=11.9 K 2024-11-22T19:24:43,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 457088d1c2889b36850d00978a970867 in 503ms, sequenceid=203, compaction requested=true 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:43,124 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:43,124 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:43,125 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:43,125 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:43,125 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:43,125 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:43,125 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:43,125 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:43,125 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/42262c42b2d74291ba4f4a139d19dc68, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7e7c8811c6d44f84b84f5e3072819507, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/60d374b4e9a04f48b81a64287ab05f5b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=35.9 K 2024-11-22T19:24:43,125 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d56608fc61b4f2fa76a6521c495d11f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=91.5 K 2024-11-22T19:24:43,126 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:43,126 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d56608fc61b4f2fa76a6521c495d11f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b] 2024-11-22T19:24:43,126 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d56608fc61b4f2fa76a6521c495d11f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732303480081 2024-11-22T19:24:43,126 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 42262c42b2d74291ba4f4a139d19dc68, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732303480081 2024-11-22T19:24:43,126 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7376d692f9b48b897dce865b69b7145, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303480392 2024-11-22T19:24:43,126 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e7c8811c6d44f84b84f5e3072819507, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303480392 2024-11-22T19:24:43,126 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96342b60cb2d41d38f512963ae1e9b7b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732303481501 2024-11-22T19:24:43,127 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 60d374b4e9a04f48b81a64287ab05f5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732303481501 2024-11-22T19:24:43,132 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:43,133 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#584 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:43,133 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/e0f312b925ca4eb0b672ed71298e9b88 is 50, key is test_row_0/B:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:43,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:43,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-22T19:24:43,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:43,134 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T19:24:43,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:43,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:43,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:43,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:43,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:43,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:43,135 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112263c20bdfdd7c4312aa4ab52b53eae002_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:43,137 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112263c20bdfdd7c4312aa4ab52b53eae002_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:43,137 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112263c20bdfdd7c4312aa4ab52b53eae002_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742509_1685 (size=12595) 2024-11-22T19:24:43,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742510_1686 (size=4469) 2024-11-22T19:24:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112209232fdaaf16403f91270bd5539ac937_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303482632/Put/seqid=0 2024-11-22T19:24:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742511_1687 (size=12304) 2024-11-22T19:24:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:43,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:43,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T19:24:43,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303543317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:43,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:43,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303543420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:43,542 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#585 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:43,543 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/595c943bbefc4437903d667793aa89f2 is 175, key is test_row_0/A:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:43,544 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/e0f312b925ca4eb0b672ed71298e9b88 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/e0f312b925ca4eb0b672ed71298e9b88 2024-11-22T19:24:43,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742512_1688 (size=31549) 2024-11-22T19:24:43,548 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into e0f312b925ca4eb0b672ed71298e9b88(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:43,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:43,548 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=13, startTime=1732303483124; duration=0sec 2024-11-22T19:24:43,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:43,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:43,548 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:43,549 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:43,549 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:43,549 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:43,549 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ed7324cf9fd5409c94b0dff38e58e0cb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/a7cefca04fc542fbace412e6c8bc57cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/85ca0cd18e924d5b885aa53b27c88494] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=35.9 K 2024-11-22T19:24:43,549 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ed7324cf9fd5409c94b0dff38e58e0cb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732303480081 2024-11-22T19:24:43,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:43,550 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting a7cefca04fc542fbace412e6c8bc57cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732303480392 2024-11-22T19:24:43,550 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 85ca0cd18e924d5b885aa53b27c88494, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732303481501 2024-11-22T19:24:43,552 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112209232fdaaf16403f91270bd5539ac937_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112209232fdaaf16403f91270bd5539ac937_457088d1c2889b36850d00978a970867 2024-11-22T19:24:43,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/63c8f07739054d2aac4527528f7aab16, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/63c8f07739054d2aac4527528f7aab16 is 175, key is test_row_0/A:col10/1732303482632/Put/seqid=0 2024-11-22T19:24:43,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742513_1689 (size=31105) 2024-11-22T19:24:43,562 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#587 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:43,563 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ac50441093234a3abeeebedfd77eb394 is 50, key is test_row_0/C:col10/1732303481501/Put/seqid=0 2024-11-22T19:24:43,564 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/63c8f07739054d2aac4527528f7aab16 2024-11-22T19:24:43,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742514_1690 (size=12595) 2024-11-22T19:24:43,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/ad677676d9dc4784a81ca1bb7b14a82a is 50, key is test_row_0/B:col10/1732303482632/Put/seqid=0 2024-11-22T19:24:43,572 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/ac50441093234a3abeeebedfd77eb394 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ac50441093234a3abeeebedfd77eb394 2024-11-22T19:24:43,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742515_1691 (size=12151) 2024-11-22T19:24:43,576 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into ac50441093234a3abeeebedfd77eb394(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:43,577 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:43,577 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=13, startTime=1732303483124; duration=0sec 2024-11-22T19:24:43,577 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:43,577 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:43,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303543621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:43,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T19:24:43,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:43,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303543923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:43,951 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/595c943bbefc4437903d667793aa89f2 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/595c943bbefc4437903d667793aa89f2 2024-11-22T19:24:43,955 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into 595c943bbefc4437903d667793aa89f2(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:43,955 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:43,955 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=13, startTime=1732303483124; duration=0sec 2024-11-22T19:24:43,955 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:43,955 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:43,975 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/ad677676d9dc4784a81ca1bb7b14a82a 2024-11-22T19:24:43,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/d8ea985abb3e4af79137616266713ed7 is 50, key is test_row_0/C:col10/1732303482632/Put/seqid=0 2024-11-22T19:24:43,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742516_1692 (size=12151) 2024-11-22T19:24:43,984 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/d8ea985abb3e4af79137616266713ed7 2024-11-22T19:24:43,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/63c8f07739054d2aac4527528f7aab16 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16 2024-11-22T19:24:43,989 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16, entries=150, sequenceid=211, filesize=30.4 K 2024-11-22T19:24:43,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/ad677676d9dc4784a81ca1bb7b14a82a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/ad677676d9dc4784a81ca1bb7b14a82a 2024-11-22T19:24:43,993 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/ad677676d9dc4784a81ca1bb7b14a82a, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T19:24:43,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/d8ea985abb3e4af79137616266713ed7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d8ea985abb3e4af79137616266713ed7 2024-11-22T19:24:43,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d8ea985abb3e4af79137616266713ed7, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T19:24:43,997 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 457088d1c2889b36850d00978a970867 in 863ms, sequenceid=211, compaction requested=false 2024-11-22T19:24:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-22T19:24:43,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-22T19:24:44,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-22T19:24:44,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3230 sec 2024-11-22T19:24:44,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.3270 sec 2024-11-22T19:24:44,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:44,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T19:24:44,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:44,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:44,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:44,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:44,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:44,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:44,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228a99cc280066425b8dd3a3ec31fe83de_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:44,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742517_1693 (size=14794) 2024-11-22T19:24:44,439 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:44,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303544439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,442 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228a99cc280066425b8dd3a3ec31fe83de_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228a99cc280066425b8dd3a3ec31fe83de_457088d1c2889b36850d00978a970867 2024-11-22T19:24:44,442 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c871ca70e34a4a7f85f8d04407e9149b, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:44,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c871ca70e34a4a7f85f8d04407e9149b is 175, key is test_row_0/A:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:44,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742518_1694 (size=39749) 2024-11-22T19:24:44,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303544525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,527 DEBUG [Thread-2774 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:44,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303544526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303544527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,529 DEBUG [Thread-2768 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:44,529 DEBUG [Thread-2766 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:44,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303544529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,532 DEBUG [Thread-2770 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:44,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303544542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303544746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:44,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T19:24:44,778 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-22T19:24:44,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-22T19:24:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T19:24:44,780 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:44,781 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:44,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:44,846 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c871ca70e34a4a7f85f8d04407e9149b 2024-11-22T19:24:44,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/77a798b83ad24a2fb87299c2af76a793 is 50, key is test_row_0/B:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:44,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742519_1695 (size=12151) 2024-11-22T19:24:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T19:24:44,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:44,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T19:24:44,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:44,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:44,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:44,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:44,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:44,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:45,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303545048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:45,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T19:24:45,085 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:45,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T19:24:45,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:45,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,237 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:45,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T19:24:45,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:45,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,238 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/77a798b83ad24a2fb87299c2af76a793 2024-11-22T19:24:45,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/909510e8480140769f236960968b8582 is 50, key is test_row_0/C:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:45,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742520_1696 (size=12151) 2024-11-22T19:24:45,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T19:24:45,390 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:45,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T19:24:45,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:45,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,542 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:45,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T19:24:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:45,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:45,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303545555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:45,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/909510e8480140769f236960968b8582 2024-11-22T19:24:45,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/c871ca70e34a4a7f85f8d04407e9149b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b 2024-11-22T19:24:45,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b, entries=200, sequenceid=243, filesize=38.8 K 2024-11-22T19:24:45,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/77a798b83ad24a2fb87299c2af76a793 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/77a798b83ad24a2fb87299c2af76a793 2024-11-22T19:24:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/77a798b83ad24a2fb87299c2af76a793, entries=150, sequenceid=243, filesize=11.9 K 2024-11-22T19:24:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/909510e8480140769f236960968b8582 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/909510e8480140769f236960968b8582 2024-11-22T19:24:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/909510e8480140769f236960968b8582, entries=150, sequenceid=243, filesize=11.9 K 2024-11-22T19:24:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 457088d1c2889b36850d00978a970867 in 1251ms, sequenceid=243, compaction requested=true 2024-11-22T19:24:45,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:45,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:45,681 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:45,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:45,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:45,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:45,682 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:45,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,682 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:45,682 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:45,682 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:45,682 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:45,682 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,682 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,683 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/e0f312b925ca4eb0b672ed71298e9b88, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/ad677676d9dc4784a81ca1bb7b14a82a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/77a798b83ad24a2fb87299c2af76a793] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=36.0 K 2024-11-22T19:24:45,683 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/595c943bbefc4437903d667793aa89f2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=100.0 K 2024-11-22T19:24:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,683 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,683 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/595c943bbefc4437903d667793aa89f2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b] 2024-11-22T19:24:45,683 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e0f312b925ca4eb0b672ed71298e9b88, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732303481501 2024-11-22T19:24:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,683 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 595c943bbefc4437903d667793aa89f2, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732303481501 2024-11-22T19:24:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,683 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ad677676d9dc4784a81ca1bb7b14a82a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732303482626 2024-11-22T19:24:45,683 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63c8f07739054d2aac4527528f7aab16, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732303482626 2024-11-22T19:24:45,683 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 77a798b83ad24a2fb87299c2af76a793, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732303483308 2024-11-22T19:24:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,684 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c871ca70e34a4a7f85f8d04407e9149b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732303483308 2024-11-22T19:24:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,690 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#593 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,691 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3ab3ddb258a142b0936d0c8f36fcdde3 is 50, key is test_row_0/B:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,693 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,695 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742521_1697 (size=12697) 2024-11-22T19:24:45,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T19:24:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:45,695 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411227b92f5bb60d941eeb3220c90ce4625d2_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,695 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T19:24:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,697 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411227b92f5bb60d941eeb3220c90ce4625d2_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:45,697 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227b92f5bb60d941eeb3220c90ce4625d2_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742522_1698 (size=4469) 2024-11-22T19:24:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122688fabe9137d46c7b82cdb50d24bf5a2_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303484433/Put/seqid=0 2024-11-22T19:24:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742523_1699 (size=9814) 2024-11-22T19:24:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,712 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122688fabe9137d46c7b82cdb50d24bf5a2_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122688fabe9137d46c7b82cdb50d24bf5a2_457088d1c2889b36850d00978a970867 2024-11-22T19:24:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a07e2a77a6a84c979d9afc9a9598d82c, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a07e2a77a6a84c979d9afc9a9598d82c is 175, key is test_row_0/A:col10/1732303484433/Put/seqid=0 2024-11-22T19:24:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742524_1700 (size=22461) 2024-11-22T19:24:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T19:24:45,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,100 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3ab3ddb258a142b0936d0c8f36fcdde3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3ab3ddb258a142b0936d0c8f36fcdde3 2024-11-22T19:24:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,102 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#594 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,103 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a3b2f2d2559646d883b2b5cb82f31f88 is 175, key is test_row_0/A:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,105 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into 3ab3ddb258a142b0936d0c8f36fcdde3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,105 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:46,105 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=13, startTime=1732303485681; duration=0sec 2024-11-22T19:24:46,105 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:46,105 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:46,105 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,106 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,106 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:46,106 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:46,107 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ac50441093234a3abeeebedfd77eb394, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d8ea985abb3e4af79137616266713ed7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/909510e8480140769f236960968b8582] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=36.0 K 2024-11-22T19:24:46,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,107 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting ac50441093234a3abeeebedfd77eb394, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732303481501 2024-11-22T19:24:46,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,107 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d8ea985abb3e4af79137616266713ed7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732303482626 2024-11-22T19:24:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,108 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 909510e8480140769f236960968b8582, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732303483308 2024-11-22T19:24:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742525_1701 (size=31651) 2024-11-22T19:24:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,119 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#596 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:46,120 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a07e2a77a6a84c979d9afc9a9598d82c 2024-11-22T19:24:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,120 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/303c5b997acc4165ac28160853247fc1 is 50, key is test_row_0/C:col10/1732303484429/Put/seqid=0 2024-11-22T19:24:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/92058d9bb89f4a609219171d32f59588 is 50, key is test_row_0/B:col10/1732303484433/Put/seqid=0 2024-11-22T19:24:46,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,159 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a3b2f2d2559646d883b2b5cb82f31f88 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a3b2f2d2559646d883b2b5cb82f31f88 2024-11-22T19:24:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,163 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into a3b2f2d2559646d883b2b5cb82f31f88(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:46,164 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:46,164 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=13, startTime=1732303485681; duration=0sec 2024-11-22T19:24:46,164 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:46,164 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742526_1702 (size=12697) 2024-11-22T19:24:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,172 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/303c5b997acc4165ac28160853247fc1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/303c5b997acc4165ac28160853247fc1 2024-11-22T19:24:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,177 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into 303c5b997acc4165ac28160853247fc1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:46,177 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:46,177 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=13, startTime=1732303485682; duration=0sec 2024-11-22T19:24:46,177 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:46,177 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742527_1703 (size=9757) 2024-11-22T19:24:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,188 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/92058d9bb89f4a609219171d32f59588 2024-11-22T19:24:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/5bd929e324e040ab9932900286eff6b7 is 50, key is test_row_0/C:col10/1732303484433/Put/seqid=0 2024-11-22T19:24:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742528_1704 (size=9757) 2024-11-22T19:24:46,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,211 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/5bd929e324e040ab9932900286eff6b7 2024-11-22T19:24:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/a07e2a77a6a84c979d9afc9a9598d82c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c 2024-11-22T19:24:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,220 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c, entries=100, sequenceid=250, filesize=21.9 K 2024-11-22T19:24:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/92058d9bb89f4a609219171d32f59588 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/92058d9bb89f4a609219171d32f59588 2024-11-22T19:24:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,225 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/92058d9bb89f4a609219171d32f59588, entries=100, sequenceid=250, filesize=9.5 K 2024-11-22T19:24:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/5bd929e324e040ab9932900286eff6b7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/5bd929e324e040ab9932900286eff6b7 2024-11-22T19:24:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,230 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/5bd929e324e040ab9932900286eff6b7, entries=100, sequenceid=250, filesize=9.5 K 2024-11-22T19:24:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,231 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 457088d1c2889b36850d00978a970867 in 535ms, sequenceid=250, compaction requested=false 2024-11-22T19:24:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-22T19:24:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-22T19:24:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-22T19:24:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4510 sec 2024-11-22T19:24:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.4550 sec 2024-11-22T19:24:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:46,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:46,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:46,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:46,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:46,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:46,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:46,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:46,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112281ff00e9874848059d1cca262a009218_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:46,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742529_1705 (size=12454) 2024-11-22T19:24:46,679 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,683 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112281ff00e9874848059d1cca262a009218_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112281ff00e9874848059d1cca262a009218_457088d1c2889b36850d00978a970867 2024-11-22T19:24:46,685 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/2d1a934a2e0a4c1da9ece6e9914d7d2c, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:46,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/2d1a934a2e0a4c1da9ece6e9914d7d2c is 175, key is test_row_0/A:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:46,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303546693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:46,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742530_1706 (size=31255) 2024-11-22T19:24:46,698 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=264, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/2d1a934a2e0a4c1da9ece6e9914d7d2c 2024-11-22T19:24:46,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3a26c614d04f49b59981095a1687fc0b is 50, key is test_row_0/B:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:46,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742531_1707 (size=12301) 2024-11-22T19:24:46,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3a26c614d04f49b59981095a1687fc0b 2024-11-22T19:24:46,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/806ef72bbff042398bac54a284449057 is 50, key is test_row_0/C:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:46,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742532_1708 (size=12301) 2024-11-22T19:24:46,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/806ef72bbff042398bac54a284449057 2024-11-22T19:24:46,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/2d1a934a2e0a4c1da9ece6e9914d7d2c as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c 2024-11-22T19:24:46,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c, entries=150, sequenceid=264, filesize=30.5 K 2024-11-22T19:24:46,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/3a26c614d04f49b59981095a1687fc0b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3a26c614d04f49b59981095a1687fc0b 2024-11-22T19:24:46,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3a26c614d04f49b59981095a1687fc0b, entries=150, sequenceid=264, filesize=12.0 K 2024-11-22T19:24:46,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/806ef72bbff042398bac54a284449057 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/806ef72bbff042398bac54a284449057 2024-11-22T19:24:46,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/806ef72bbff042398bac54a284449057, entries=150, sequenceid=264, filesize=12.0 K 2024-11-22T19:24:46,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 457088d1c2889b36850d00978a970867 in 150ms, sequenceid=264, compaction requested=true 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:46,777 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:46,777 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:46,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:46,778 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:46,778 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:46,778 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:46,778 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:46,778 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:46,778 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:46,778 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3ab3ddb258a142b0936d0c8f36fcdde3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/92058d9bb89f4a609219171d32f59588, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3a26c614d04f49b59981095a1687fc0b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=33.9 K 2024-11-22T19:24:46,778 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a3b2f2d2559646d883b2b5cb82f31f88, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=83.4 K 2024-11-22T19:24:46,778 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:46,778 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a3b2f2d2559646d883b2b5cb82f31f88, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c] 2024-11-22T19:24:46,779 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ab3ddb258a142b0936d0c8f36fcdde3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732303483308 2024-11-22T19:24:46,779 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3b2f2d2559646d883b2b5cb82f31f88, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732303483308 2024-11-22T19:24:46,779 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 92058d9bb89f4a609219171d32f59588, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303484433 2024-11-22T19:24:46,779 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a07e2a77a6a84c979d9afc9a9598d82c, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303484433 2024-11-22T19:24:46,779 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a26c614d04f49b59981095a1687fc0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732303486615 2024-11-22T19:24:46,779 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d1a934a2e0a4c1da9ece6e9914d7d2c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732303486615 2024-11-22T19:24:46,785 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#602 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:46,786 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/b06968f7333148aa9c56b8f9b6e9321d is 50, key is test_row_0/B:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:46,787 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:46,788 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112214782a8216224c01a675c00efa24658f_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:46,790 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112214782a8216224c01a675c00efa24658f_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:46,790 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112214782a8216224c01a675c00efa24658f_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:46,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742533_1709 (size=12949) 2024-11-22T19:24:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:46,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T19:24:46,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:46,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:46,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742534_1710 (size=4469) 2024-11-22T19:24:46,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411220d4cb6f7f8d2428bbcfd8aecdb061841_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303486798/Put/seqid=0 2024-11-22T19:24:46,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:46,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303546827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:46,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742535_1711 (size=14994) 2024-11-22T19:24:46,838 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:46,841 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411220d4cb6f7f8d2428bbcfd8aecdb061841_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411220d4cb6f7f8d2428bbcfd8aecdb061841_457088d1c2889b36850d00978a970867 2024-11-22T19:24:46,842 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/55be4469b0fa4ac88889695280ad4970, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:46,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/55be4469b0fa4ac88889695280ad4970 is 175, key is test_row_0/A:col10/1732303486798/Put/seqid=0 2024-11-22T19:24:46,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742536_1712 (size=39949) 2024-11-22T19:24:46,870 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/55be4469b0fa4ac88889695280ad4970 2024-11-22T19:24:46,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/74e19a8217954c37a20e4e5b6e2a76e6 is 50, key is test_row_0/B:col10/1732303486798/Put/seqid=0 2024-11-22T19:24:46,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742537_1713 (size=12301) 2024-11-22T19:24:46,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/74e19a8217954c37a20e4e5b6e2a76e6 2024-11-22T19:24:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T19:24:46,884 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-22T19:24:46,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-22T19:24:46,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/e40927fb8fc54e6bb50e20f9bd852c0a is 50, key is test_row_0/C:col10/1732303486798/Put/seqid=0 2024-11-22T19:24:46,888 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:46,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:46,888 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:46,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:46,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742538_1714 (size=12301) 2024-11-22T19:24:46,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303546931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:47,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:47,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T19:24:47,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:47,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:47,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:47,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:47,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:47,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:47,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:47,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303547134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:47,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:47,192 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:47,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T19:24:47,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:47,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:47,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:47,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:47,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:47,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:47,194 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/b06968f7333148aa9c56b8f9b6e9321d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b06968f7333148aa9c56b8f9b6e9321d 2024-11-22T19:24:47,199 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into b06968f7333148aa9c56b8f9b6e9321d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:47,199 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:47,199 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=13, startTime=1732303486777; duration=0sec 2024-11-22T19:24:47,199 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:47,199 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:47,199 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T19:24:47,200 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T19:24:47,200 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:47,200 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:47,200 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/303c5b997acc4165ac28160853247fc1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/5bd929e324e040ab9932900286eff6b7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/806ef72bbff042398bac54a284449057] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=33.9 K 2024-11-22T19:24:47,201 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 303c5b997acc4165ac28160853247fc1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732303483308 2024-11-22T19:24:47,201 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bd929e324e040ab9932900286eff6b7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732303484433 2024-11-22T19:24:47,201 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 806ef72bbff042398bac54a284449057, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732303486615 2024-11-22T19:24:47,214 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#607 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:47,214 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/307f2367c9ad43088edc25f77e8a4f90 is 50, key is test_row_0/C:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:47,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742539_1715 (size=12949) 2024-11-22T19:24:47,222 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#603 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:47,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/418eb8289ee240b39fc64542bac092d9 is 175, key is test_row_0/A:col10/1732303486626/Put/seqid=0 2024-11-22T19:24:47,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742540_1716 (size=31903) 2024-11-22T19:24:47,233 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/418eb8289ee240b39fc64542bac092d9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/418eb8289ee240b39fc64542bac092d9 2024-11-22T19:24:47,237 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into 418eb8289ee240b39fc64542bac092d9(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:47,237 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:47,237 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=13, startTime=1732303486777; duration=0sec 2024-11-22T19:24:47,237 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:47,237 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:47,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/e40927fb8fc54e6bb50e20f9bd852c0a 2024-11-22T19:24:47,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/55be4469b0fa4ac88889695280ad4970 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970 2024-11-22T19:24:47,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970, entries=200, sequenceid=290, filesize=39.0 K 2024-11-22T19:24:47,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/74e19a8217954c37a20e4e5b6e2a76e6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/74e19a8217954c37a20e4e5b6e2a76e6 2024-11-22T19:24:47,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/74e19a8217954c37a20e4e5b6e2a76e6, entries=150, sequenceid=290, filesize=12.0 K 2024-11-22T19:24:47,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/e40927fb8fc54e6bb50e20f9bd852c0a as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/e40927fb8fc54e6bb50e20f9bd852c0a 2024-11-22T19:24:47,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/e40927fb8fc54e6bb50e20f9bd852c0a, entries=150, sequenceid=290, filesize=12.0 K 2024-11-22T19:24:47,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 457088d1c2889b36850d00978a970867 in 510ms, sequenceid=290, compaction requested=false 2024-11-22T19:24:47,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:47,345 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:47,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T19:24:47,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:47,346 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T19:24:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:47,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229b3d5384efab4a0f8d7000c95893567a_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303486820/Put/seqid=0 2024-11-22T19:24:47,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742541_1717 (size=12454) 2024-11-22T19:24:47,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:47,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:47,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:47,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:47,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303547496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:47,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:47,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303547598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:47,624 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/307f2367c9ad43088edc25f77e8a4f90 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/307f2367c9ad43088edc25f77e8a4f90 2024-11-22T19:24:47,628 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into 307f2367c9ad43088edc25f77e8a4f90(size=12.6 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:47,628 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:47,628 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=13, startTime=1732303486777; duration=0sec 2024-11-22T19:24:47,628 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:47,628 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:47,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:47,765 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229b3d5384efab4a0f8d7000c95893567a_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229b3d5384efab4a0f8d7000c95893567a_457088d1c2889b36850d00978a970867 2024-11-22T19:24:47,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/1ecd5a0bf71a4782af27eaceeae1dcfe, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:47,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/1ecd5a0bf71a4782af27eaceeae1dcfe is 175, key is test_row_0/A:col10/1732303486820/Put/seqid=0 2024-11-22T19:24:47,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742542_1718 (size=31255) 2024-11-22T19:24:47,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:47,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303547802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:47,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:48,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:48,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303548105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:48,171 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=302, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/1ecd5a0bf71a4782af27eaceeae1dcfe 2024-11-22T19:24:48,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/64d0079a93b84d6c995c608d46337039 is 50, key is test_row_0/B:col10/1732303486820/Put/seqid=0 2024-11-22T19:24:48,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742543_1719 (size=12301) 2024-11-22T19:24:48,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:48,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32894 deadline: 1732303548531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:48,533 DEBUG [Thread-2774 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:48,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:48,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32890 deadline: 1732303548535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:48,537 DEBUG [Thread-2768 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:48,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32914 deadline: 1732303548559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:48,561 DEBUG [Thread-2770 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:48,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32904 deadline: 1732303548566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:48,567 DEBUG [Thread-2766 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., hostname=a307a1377457,35917,1732303314657, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T19:24:48,581 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/64d0079a93b84d6c995c608d46337039 2024-11-22T19:24:48,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/0762add977ac49f59abad778a8330cce is 50, key is test_row_0/C:col10/1732303486820/Put/seqid=0 2024-11-22T19:24:48,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742544_1720 (size=12301) 2024-11-22T19:24:48,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303548608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:48,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:48,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/0762add977ac49f59abad778a8330cce 2024-11-22T19:24:49,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/1ecd5a0bf71a4782af27eaceeae1dcfe as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe 2024-11-22T19:24:49,003 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe, entries=150, sequenceid=302, filesize=30.5 K 2024-11-22T19:24:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/64d0079a93b84d6c995c608d46337039 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/64d0079a93b84d6c995c608d46337039 2024-11-22T19:24:49,006 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/64d0079a93b84d6c995c608d46337039, entries=150, sequenceid=302, filesize=12.0 K 2024-11-22T19:24:49,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/0762add977ac49f59abad778a8330cce as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/0762add977ac49f59abad778a8330cce 2024-11-22T19:24:49,010 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/0762add977ac49f59abad778a8330cce, entries=150, sequenceid=302, filesize=12.0 K 2024-11-22T19:24:49,011 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 457088d1c2889b36850d00978a970867 in 1666ms, sequenceid=302, compaction requested=true 2024-11-22T19:24:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-22T19:24:49,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-22T19:24:49,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-22T19:24:49,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1240 sec 2024-11-22T19:24:49,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 2.1270 sec 2024-11-22T19:24:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:49,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T19:24:49,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:49,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:49,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:49,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:49,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:49,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:49,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a7559c560ac44165a205076a2a119e23_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:49,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742545_1721 (size=12454) 2024-11-22T19:24:49,627 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:49,629 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a7559c560ac44165a205076a2a119e23_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a7559c560ac44165a205076a2a119e23_457088d1c2889b36850d00978a970867 2024-11-22T19:24:49,630 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/5404dd5deb9542819e5d35c073651387, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:49,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/5404dd5deb9542819e5d35c073651387 is 175, key is test_row_0/A:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:49,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742546_1722 (size=31255) 2024-11-22T19:24:49,634 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/5404dd5deb9542819e5d35c073651387 2024-11-22T19:24:49,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303549633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:49,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/dca70a1e463b47dda3e36b5c1faa4d23 is 50, key is test_row_0/B:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742547_1723 (size=12301) 2024-11-22T19:24:49,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303549736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:49,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303549939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:50,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/dca70a1e463b47dda3e36b5c1faa4d23 2024-11-22T19:24:50,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/4d8a648ef9264cf4a51bdcbd2e67667d is 50, key is test_row_0/C:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:50,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742548_1724 (size=12301) 2024-11-22T19:24:50,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:50,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303550242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:50,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/4d8a648ef9264cf4a51bdcbd2e67667d 2024-11-22T19:24:50,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/5404dd5deb9542819e5d35c073651387 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387 2024-11-22T19:24:50,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387, entries=150, sequenceid=330, filesize=30.5 K 2024-11-22T19:24:50,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/dca70a1e463b47dda3e36b5c1faa4d23 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/dca70a1e463b47dda3e36b5c1faa4d23 2024-11-22T19:24:50,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/dca70a1e463b47dda3e36b5c1faa4d23, entries=150, sequenceid=330, filesize=12.0 K 2024-11-22T19:24:50,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/4d8a648ef9264cf4a51bdcbd2e67667d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/4d8a648ef9264cf4a51bdcbd2e67667d 2024-11-22T19:24:50,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/4d8a648ef9264cf4a51bdcbd2e67667d, entries=150, sequenceid=330, filesize=12.0 K 2024-11-22T19:24:50,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 457088d1c2889b36850d00978a970867 in 851ms, sequenceid=330, compaction requested=true 2024-11-22T19:24:50,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:50,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:50,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:50,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:50,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:50,468 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:50,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:50,468 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:50,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:50,469 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:50,469 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:50,469 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:50,469 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:50,469 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:50,469 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:50,469 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b06968f7333148aa9c56b8f9b6e9321d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/74e19a8217954c37a20e4e5b6e2a76e6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/64d0079a93b84d6c995c608d46337039, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/dca70a1e463b47dda3e36b5c1faa4d23] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=48.7 K 2024-11-22T19:24:50,469 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/418eb8289ee240b39fc64542bac092d9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=131.2 K 2024-11-22T19:24:50,469 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:50,469 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/418eb8289ee240b39fc64542bac092d9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387] 2024-11-22T19:24:50,469 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting b06968f7333148aa9c56b8f9b6e9321d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732303486615 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 418eb8289ee240b39fc64542bac092d9, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732303486615 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 74e19a8217954c37a20e4e5b6e2a76e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732303486681 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55be4469b0fa4ac88889695280ad4970, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732303486681 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 64d0079a93b84d6c995c608d46337039, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732303486816 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ecd5a0bf71a4782af27eaceeae1dcfe, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732303486816 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting dca70a1e463b47dda3e36b5c1faa4d23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732303487483 2024-11-22T19:24:50,470 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5404dd5deb9542819e5d35c073651387, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732303487483 2024-11-22T19:24:50,476 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:50,478 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122485929f8abfb41f59d3fa3d16b9a6d48_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:50,479 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#615 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:50,480 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7365ab6019ba418882b9b7090cb95840 is 50, key is test_row_0/B:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:50,480 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122485929f8abfb41f59d3fa3d16b9a6d48_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:50,480 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122485929f8abfb41f59d3fa3d16b9a6d48_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:50,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742549_1725 (size=13085) 2024-11-22T19:24:50,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742550_1726 (size=4469) 2024-11-22T19:24:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:50,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T19:24:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:50,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b3b9c679ac3443bb9c4d5c63dfaf7603_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303490751/Put/seqid=0 2024-11-22T19:24:50,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742551_1727 (size=14994) 2024-11-22T19:24:50,761 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:50,763 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b3b9c679ac3443bb9c4d5c63dfaf7603_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b3b9c679ac3443bb9c4d5c63dfaf7603_457088d1c2889b36850d00978a970867 2024-11-22T19:24:50,764 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/75f6d419134d4a88b5a48e085ce0e3a7, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:50,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/75f6d419134d4a88b5a48e085ce0e3a7 is 175, key is test_row_0/A:col10/1732303490751/Put/seqid=0 2024-11-22T19:24:50,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742552_1728 (size=39949) 2024-11-22T19:24:50,768 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=341, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/75f6d419134d4a88b5a48e085ce0e3a7 2024-11-22T19:24:50,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/28db7323a9434b0cbc337cb1045b3fc0 is 50, key is test_row_0/B:col10/1732303490751/Put/seqid=0 2024-11-22T19:24:50,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742553_1729 (size=12301) 2024-11-22T19:24:50,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/28db7323a9434b0cbc337cb1045b3fc0 2024-11-22T19:24:50,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/57475e5194ce40d6b700e58a22bb127d is 50, key is test_row_0/C:col10/1732303490751/Put/seqid=0 2024-11-22T19:24:50,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742554_1730 (size=12301) 2024-11-22T19:24:50,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:50,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303550827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:50,885 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#614 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:50,886 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/773fa8b7dfb54bb39bbed235bc4afc76 is 175, key is test_row_0/A:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:50,888 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/7365ab6019ba418882b9b7090cb95840 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7365ab6019ba418882b9b7090cb95840 2024-11-22T19:24:50,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742555_1731 (size=32039) 2024-11-22T19:24:50,892 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into 7365ab6019ba418882b9b7090cb95840(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:50,892 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:50,892 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=12, startTime=1732303490468; duration=0sec 2024-11-22T19:24:50,892 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:50,892 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:50,892 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:50,893 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:50,893 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:50,893 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:50,893 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/773fa8b7dfb54bb39bbed235bc4afc76 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/773fa8b7dfb54bb39bbed235bc4afc76 2024-11-22T19:24:50,893 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/307f2367c9ad43088edc25f77e8a4f90, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/e40927fb8fc54e6bb50e20f9bd852c0a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/0762add977ac49f59abad778a8330cce, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/4d8a648ef9264cf4a51bdcbd2e67667d] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=48.7 K 2024-11-22T19:24:50,894 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 307f2367c9ad43088edc25f77e8a4f90, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732303486615 2024-11-22T19:24:50,894 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting e40927fb8fc54e6bb50e20f9bd852c0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732303486681 2024-11-22T19:24:50,895 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0762add977ac49f59abad778a8330cce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732303486816 2024-11-22T19:24:50,895 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d8a648ef9264cf4a51bdcbd2e67667d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732303487483 2024-11-22T19:24:50,897 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into 773fa8b7dfb54bb39bbed235bc4afc76(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:50,897 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:50,897 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=12, startTime=1732303490468; duration=0sec 2024-11-22T19:24:50,897 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:50,897 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:50,901 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#619 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:50,901 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/c4a99886c3b047e2a66b975eb781726f is 50, key is test_row_0/C:col10/1732303487483/Put/seqid=0 2024-11-22T19:24:50,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742556_1732 (size=13085) 2024-11-22T19:24:50,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:50,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303550930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:50,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T19:24:50,992 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-22T19:24:50,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:50,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-22T19:24:50,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T19:24:50,995 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:50,995 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:50,996 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:51,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T19:24:51,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303551133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:51,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:51,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T19:24:51,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:51,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:51,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:51,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:51,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:51,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:51,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/57475e5194ce40d6b700e58a22bb127d 2024-11-22T19:24:51,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/75f6d419134d4a88b5a48e085ce0e3a7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7 2024-11-22T19:24:51,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7, entries=200, sequenceid=341, filesize=39.0 K 2024-11-22T19:24:51,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/28db7323a9434b0cbc337cb1045b3fc0 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/28db7323a9434b0cbc337cb1045b3fc0 2024-11-22T19:24:51,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/28db7323a9434b0cbc337cb1045b3fc0, entries=150, sequenceid=341, filesize=12.0 K 2024-11-22T19:24:51,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/57475e5194ce40d6b700e58a22bb127d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/57475e5194ce40d6b700e58a22bb127d 2024-11-22T19:24:51,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/57475e5194ce40d6b700e58a22bb127d, entries=150, sequenceid=341, filesize=12.0 K 2024-11-22T19:24:51,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 457088d1c2889b36850d00978a970867 in 447ms, sequenceid=341, compaction requested=false 2024-11-22T19:24:51,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T19:24:51,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:51,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T19:24:51,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:51,300 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T19:24:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c091f514fe5249d1b3444b41b18e2e01_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303490823/Put/seqid=0 2024-11-22T19:24:51,309 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/c4a99886c3b047e2a66b975eb781726f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/c4a99886c3b047e2a66b975eb781726f 2024-11-22T19:24:51,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742557_1733 (size=12454) 2024-11-22T19:24:51,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:51,313 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into c4a99886c3b047e2a66b975eb781726f(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:51,313 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:51,313 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=12, startTime=1732303490468; duration=0sec 2024-11-22T19:24:51,313 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:51,313 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:51,314 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c091f514fe5249d1b3444b41b18e2e01_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c091f514fe5249d1b3444b41b18e2e01_457088d1c2889b36850d00978a970867 2024-11-22T19:24:51,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/6252ea920828495f9f23e5b35e2267c7, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:51,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/6252ea920828495f9f23e5b35e2267c7 is 175, key is test_row_0/A:col10/1732303490823/Put/seqid=0 2024-11-22T19:24:51,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742558_1734 (size=31255) 2024-11-22T19:24:51,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:51,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:51,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:51,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303551456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:51,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:51,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303551558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:51,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T19:24:51,719 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=368, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/6252ea920828495f9f23e5b35e2267c7 2024-11-22T19:24:51,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/cb6c03f1a5c8476d945916b275512385 is 50, key is test_row_0/B:col10/1732303490823/Put/seqid=0 2024-11-22T19:24:51,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742559_1735 (size=12301) 2024-11-22T19:24:51,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303551761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:52,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303552063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T19:24:52,129 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/cb6c03f1a5c8476d945916b275512385 2024-11-22T19:24:52,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/d750243786c04bd3b87221aea1bfea1d is 50, key is test_row_0/C:col10/1732303490823/Put/seqid=0 2024-11-22T19:24:52,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742560_1736 (size=12301) 2024-11-22T19:24:52,140 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/d750243786c04bd3b87221aea1bfea1d 2024-11-22T19:24:52,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/6252ea920828495f9f23e5b35e2267c7 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7 2024-11-22T19:24:52,146 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7, entries=150, sequenceid=368, filesize=30.5 K 2024-11-22T19:24:52,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/cb6c03f1a5c8476d945916b275512385 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cb6c03f1a5c8476d945916b275512385 2024-11-22T19:24:52,150 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cb6c03f1a5c8476d945916b275512385, entries=150, sequenceid=368, filesize=12.0 K 2024-11-22T19:24:52,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/d750243786c04bd3b87221aea1bfea1d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d750243786c04bd3b87221aea1bfea1d 2024-11-22T19:24:52,153 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d750243786c04bd3b87221aea1bfea1d, entries=150, sequenceid=368, filesize=12.0 K 2024-11-22T19:24:52,154 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 457088d1c2889b36850d00978a970867 in 854ms, sequenceid=368, compaction requested=true 2024-11-22T19:24:52,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:52,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:52,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-22T19:24:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-22T19:24:52,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-22T19:24:52,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1590 sec 2024-11-22T19:24:52,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.1630 sec 2024-11-22T19:24:52,402 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T19:24:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:52,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T19:24:52,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:52,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:52,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:52,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:52,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:52,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:52,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cdca6c4d708e41d39fbefd693aa63bd8_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:52,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742561_1737 (size=14994) 2024-11-22T19:24:52,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303552641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:52,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303552745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:52,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303552948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:52,986 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:52,988 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cdca6c4d708e41d39fbefd693aa63bd8_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdca6c4d708e41d39fbefd693aa63bd8_457088d1c2889b36850d00978a970867 2024-11-22T19:24:52,989 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/856a3acfffaf42da90928019d1ebe3b4, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:52,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/856a3acfffaf42da90928019d1ebe3b4 is 175, key is test_row_0/A:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:52,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742562_1738 (size=39949) 2024-11-22T19:24:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T19:24:53,098 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-22T19:24:53,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T19:24:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-22T19:24:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:53,101 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T19:24:53,101 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T19:24:53,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T19:24:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:53,244 DEBUG [Thread-2785 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68c2838a to 127.0.0.1:57120 2024-11-22T19:24:53,244 DEBUG [Thread-2785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:53,244 DEBUG [Thread-2783 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cfdf76c to 127.0.0.1:57120 2024-11-22T19:24:53,244 DEBUG [Thread-2783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:53,245 DEBUG [Thread-2781 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a3b66d3 to 127.0.0.1:57120 2024-11-22T19:24:53,246 DEBUG [Thread-2781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:53,246 DEBUG [Thread-2779 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:57120 2024-11-22T19:24:53,246 DEBUG [Thread-2779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:53,246 DEBUG [Thread-2777 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:57120 2024-11-22T19:24:53,246 DEBUG [Thread-2777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:53,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303553252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:53,253 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:53,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:53,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:53,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,393 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=381, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/856a3acfffaf42da90928019d1ebe3b4 2024-11-22T19:24:53,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/8ecade6fa29f4c34b360cede996192e1 is 50, key is test_row_0/B:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:53,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742563_1739 (size=12301) 2024-11-22T19:24:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:53,405 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:53,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:53,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:53,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:53,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:53,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:53,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:53,709 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:53,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:53,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:53,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T19:24:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732303553754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 2024-11-22T19:24:53,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/8ecade6fa29f4c34b360cede996192e1 2024-11-22T19:24:53,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/08b5df0af50d4e6d864595bdc449984b is 50, key is test_row_0/C:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:53,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742564_1740 (size=12301) 2024-11-22T19:24:53,862 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:53,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:53,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:53,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:53,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:53,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:54,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,166 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:54,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:54,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:54,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T19:24:54,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:54,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/08b5df0af50d4e6d864595bdc449984b 2024-11-22T19:24:54,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/856a3acfffaf42da90928019d1ebe3b4 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4 2024-11-22T19:24:54,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4, entries=200, sequenceid=381, filesize=39.0 K 2024-11-22T19:24:54,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/8ecade6fa29f4c34b360cede996192e1 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/8ecade6fa29f4c34b360cede996192e1 2024-11-22T19:24:54,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/8ecade6fa29f4c34b360cede996192e1, entries=150, sequenceid=381, filesize=12.0 K 2024-11-22T19:24:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/08b5df0af50d4e6d864595bdc449984b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/08b5df0af50d4e6d864595bdc449984b 2024-11-22T19:24:54,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/08b5df0af50d4e6d864595bdc449984b, entries=150, sequenceid=381, filesize=12.0 K 2024-11-22T19:24:54,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 457088d1c2889b36850d00978a970867 in 1652ms, sequenceid=381, compaction requested=true 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 457088d1c2889b36850d00978a970867:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T19:24:54,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:54,221 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:54,221 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/A is initiating minor compaction (all files) 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/B is initiating minor compaction (all files) 2024-11-22T19:24:54,222 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/A in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,222 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/B in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,222 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/773fa8b7dfb54bb39bbed235bc4afc76, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=139.8 K 2024-11-22T19:24:54,222 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7365ab6019ba418882b9b7090cb95840, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/28db7323a9434b0cbc337cb1045b3fc0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cb6c03f1a5c8476d945916b275512385, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/8ecade6fa29f4c34b360cede996192e1] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=48.8 K 2024-11-22T19:24:54,222 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. files: [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/773fa8b7dfb54bb39bbed235bc4afc76, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4] 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7365ab6019ba418882b9b7090cb95840, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732303487483 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 773fa8b7dfb54bb39bbed235bc4afc76, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732303487483 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 28db7323a9434b0cbc337cb1045b3fc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732303489632 2024-11-22T19:24:54,222 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75f6d419134d4a88b5a48e085ce0e3a7, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732303489629 2024-11-22T19:24:54,223 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting cb6c03f1a5c8476d945916b275512385, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732303490797 2024-11-22T19:24:54,223 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6252ea920828495f9f23e5b35e2267c7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732303490797 2024-11-22T19:24:54,223 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ecade6fa29f4c34b360cede996192e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732303491452 2024-11-22T19:24:54,223 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 856a3acfffaf42da90928019d1ebe3b4, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732303491451 2024-11-22T19:24:54,228 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:54,228 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#B#compaction#626 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:54,229 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/cc6be1d51cb34dbb8fe182e01aa27ddf is 50, key is test_row_0/B:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:54,229 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411220a18b3b04257458da7f704146c155aae_457088d1c2889b36850d00978a970867 store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:54,231 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411220a18b3b04257458da7f704146c155aae_457088d1c2889b36850d00978a970867, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:54,231 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411220a18b3b04257458da7f704146c155aae_457088d1c2889b36850d00978a970867 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:54,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742565_1741 (size=13221) 2024-11-22T19:24:54,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742566_1742 (size=4469) 2024-11-22T19:24:54,238 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#A#compaction#627 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:54,239 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/7437a446b3ad46e7b25f8feb24b1d1f6 is 175, key is test_row_0/A:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:54,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742567_1743 (size=32175) 2024-11-22T19:24:54,319 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:54,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-22T19:24:54,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,319 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T19:24:54,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:54,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:54,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:54,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:54,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:54,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221ba98400790d475e919ec5153732c97f_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303492633/Put/seqid=0 2024-11-22T19:24:54,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742568_1744 (size=12454) 2024-11-22T19:24:54,637 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/cc6be1d51cb34dbb8fe182e01aa27ddf as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cc6be1d51cb34dbb8fe182e01aa27ddf 2024-11-22T19:24:54,640 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/B of 457088d1c2889b36850d00978a970867 into cc6be1d51cb34dbb8fe182e01aa27ddf(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:54,640 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:54,640 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/B, priority=12, startTime=1732303494221; duration=0sec 2024-11-22T19:24:54,640 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T19:24:54,640 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:B 2024-11-22T19:24:54,640 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T19:24:54,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T19:24:54,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1540): 457088d1c2889b36850d00978a970867/C is initiating minor compaction (all files) 2024-11-22T19:24:54,641 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 457088d1c2889b36850d00978a970867/C in TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:54,641 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/c4a99886c3b047e2a66b975eb781726f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/57475e5194ce40d6b700e58a22bb127d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d750243786c04bd3b87221aea1bfea1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/08b5df0af50d4e6d864595bdc449984b] into tmpdir=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp, totalSize=48.8 K 2024-11-22T19:24:54,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting c4a99886c3b047e2a66b975eb781726f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732303487483 2024-11-22T19:24:54,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 57475e5194ce40d6b700e58a22bb127d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732303489632 2024-11-22T19:24:54,641 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting d750243786c04bd3b87221aea1bfea1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732303490797 2024-11-22T19:24:54,642 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] compactions.Compactor(224): Compacting 08b5df0af50d4e6d864595bdc449984b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732303491452 2024-11-22T19:24:54,644 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/7437a446b3ad46e7b25f8feb24b1d1f6 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/7437a446b3ad46e7b25f8feb24b1d1f6 2024-11-22T19:24:54,647 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/A of 457088d1c2889b36850d00978a970867 into 7437a446b3ad46e7b25f8feb24b1d1f6(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:54,647 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:54,647 INFO [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/A, priority=12, startTime=1732303494221; duration=0sec 2024-11-22T19:24:54,647 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:54,647 DEBUG [RS:0;a307a1377457:35917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:A 2024-11-22T19:24:54,647 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 457088d1c2889b36850d00978a970867#C#compaction#629 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T19:24:54,648 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/cc97d332e0af41bb8e7a6dfc8fd0320b is 50, key is test_row_0/C:col10/1732303491455/Put/seqid=0 2024-11-22T19:24:54,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742569_1745 (size=13221) 2024-11-22T19:24:54,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:54,730 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221ba98400790d475e919ec5153732c97f_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221ba98400790d475e919ec5153732c97f_457088d1c2889b36850d00978a970867 2024-11-22T19:24:54,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/d084b95d20f94ba584e30be4f82e5499, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:54,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/d084b95d20f94ba584e30be4f82e5499 is 175, key is test_row_0/A:col10/1732303492633/Put/seqid=0 2024-11-22T19:24:54,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742570_1746 (size=31255) 2024-11-22T19:24:54,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35917 {}] regionserver.HRegion(8581): Flush requested on 457088d1c2889b36850d00978a970867 2024-11-22T19:24:54,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. as already flushing 2024-11-22T19:24:54,763 DEBUG [Thread-2772 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11030ef5 to 127.0.0.1:57120 2024-11-22T19:24:54,763 DEBUG [Thread-2772 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:55,054 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/cc97d332e0af41bb8e7a6dfc8fd0320b as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/cc97d332e0af41bb8e7a6dfc8fd0320b 2024-11-22T19:24:55,057 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 457088d1c2889b36850d00978a970867/C of 457088d1c2889b36850d00978a970867 into cc97d332e0af41bb8e7a6dfc8fd0320b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T19:24:55,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:55,057 INFO [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867., storeName=457088d1c2889b36850d00978a970867/C, priority=12, startTime=1732303494221; duration=0sec 2024-11-22T19:24:55,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T19:24:55,057 DEBUG [RS:0;a307a1377457:35917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 457088d1c2889b36850d00978a970867:C 2024-11-22T19:24:55,135 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=405, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/d084b95d20f94ba584e30be4f82e5499 2024-11-22T19:24:55,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/b2fade230d8f46b9a2a28d8709d84980 is 50, key is test_row_0/B:col10/1732303492633/Put/seqid=0 2024-11-22T19:24:55,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742571_1747 (size=12301) 2024-11-22T19:24:55,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:55,543 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/b2fade230d8f46b9a2a28d8709d84980 2024-11-22T19:24:55,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/78ca15ab94894cc49ff79cb256326497 is 50, key is test_row_0/C:col10/1732303492633/Put/seqid=0 2024-11-22T19:24:55,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742572_1748 (size=12301) 2024-11-22T19:24:55,952 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/78ca15ab94894cc49ff79cb256326497 2024-11-22T19:24:55,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/d084b95d20f94ba584e30be4f82e5499 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d084b95d20f94ba584e30be4f82e5499 2024-11-22T19:24:55,958 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d084b95d20f94ba584e30be4f82e5499, entries=150, sequenceid=405, filesize=30.5 K 2024-11-22T19:24:55,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/b2fade230d8f46b9a2a28d8709d84980 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b2fade230d8f46b9a2a28d8709d84980 2024-11-22T19:24:55,962 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b2fade230d8f46b9a2a28d8709d84980, entries=150, sequenceid=405, filesize=12.0 K 2024-11-22T19:24:55,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/78ca15ab94894cc49ff79cb256326497 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/78ca15ab94894cc49ff79cb256326497 2024-11-22T19:24:55,965 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/78ca15ab94894cc49ff79cb256326497, entries=150, sequenceid=405, filesize=12.0 K 2024-11-22T19:24:55,965 INFO [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=6.71 KB/6870 for 457088d1c2889b36850d00978a970867 in 1646ms, sequenceid=405, compaction requested=false 2024-11-22T19:24:55,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:24:55,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:55,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a307a1377457:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-22T19:24:55,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-22T19:24:55,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-22T19:24:55,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8650 sec 2024-11-22T19:24:55,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 2.8670 sec 2024-11-22T19:24:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T19:24:57,205 INFO [Thread-2776 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-22T19:24:58,543 DEBUG [Thread-2774 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69abefea to 127.0.0.1:57120 2024-11-22T19:24:58,543 DEBUG [Thread-2774 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:58,585 DEBUG [Thread-2768 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bc9c3e to 127.0.0.1:57120 2024-11-22T19:24:58,585 DEBUG [Thread-2768 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:58,630 DEBUG [Thread-2766 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ac53e79 to 127.0.0.1:57120 2024-11-22T19:24:58,630 DEBUG [Thread-2766 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:58,657 DEBUG [Thread-2770 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7181df3b to 127.0.0.1:57120 2024-11-22T19:24:58,658 DEBUG [Thread-2770 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 189 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7185 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6781 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6499 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7203 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6798 2024-11-22T19:24:58,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T19:24:58,658 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:24:58,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5765d46a to 127.0.0.1:57120 2024-11-22T19:24:58,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:24:58,659 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T19:24:58,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T19:24:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T19:24:58,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-22T19:24:58,661 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303498661"}]},"ts":"1732303498661"} 2024-11-22T19:24:58,662 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T19:24:58,664 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T19:24:58,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T19:24:58,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, UNASSIGN}] 2024-11-22T19:24:58,665 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, UNASSIGN 2024-11-22T19:24:58,666 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=CLOSING, regionLocation=a307a1377457,35917,1732303314657 2024-11-22T19:24:58,666 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T19:24:58,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; CloseRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657}] 2024-11-22T19:24:58,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-22T19:24:58,817 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a307a1377457,35917,1732303314657 2024-11-22T19:24:58,818 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(124): Close 457088d1c2889b36850d00978a970867 2024-11-22T19:24:58,818 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T19:24:58,818 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1681): Closing 457088d1c2889b36850d00978a970867, disabling compactions & flushes 2024-11-22T19:24:58,818 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:58,818 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:58,818 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. after waiting 0 ms 2024-11-22T19:24:58,818 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:24:58,818 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(2837): Flushing 457088d1c2889b36850d00978a970867 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T19:24:58,818 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=A 2024-11-22T19:24:58,819 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:58,819 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=B 2024-11-22T19:24:58,819 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:58,819 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 457088d1c2889b36850d00978a970867, store=C 2024-11-22T19:24:58,819 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T19:24:58,823 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122398bbdb8b9b14fe189c09c5909ceff12_457088d1c2889b36850d00978a970867 is 50, key is test_row_0/A:col10/1732303498542/Put/seqid=0 2024-11-22T19:24:58,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742573_1749 (size=12454) 2024-11-22T19:24:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-22T19:24:59,227 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T19:24:59,230 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122398bbdb8b9b14fe189c09c5909ceff12_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122398bbdb8b9b14fe189c09c5909ceff12_457088d1c2889b36850d00978a970867 2024-11-22T19:24:59,231 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/25e624e1f0a143d2901e8f0441794199, store: [table=TestAcidGuarantees family=A region=457088d1c2889b36850d00978a970867] 2024-11-22T19:24:59,231 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/25e624e1f0a143d2901e8f0441794199 is 175, key is test_row_0/A:col10/1732303498542/Put/seqid=0 2024-11-22T19:24:59,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742574_1750 (size=31255) 2024-11-22T19:24:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-22T19:24:59,635 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/25e624e1f0a143d2901e8f0441794199 2024-11-22T19:24:59,640 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/786f94c9a4204936a52b13ebaa657d80 is 50, key is test_row_0/B:col10/1732303498542/Put/seqid=0 2024-11-22T19:24:59,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742575_1751 (size=12301) 2024-11-22T19:24:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-22T19:25:00,044 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/786f94c9a4204936a52b13ebaa657d80 2024-11-22T19:25:00,049 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/7790caf830b9467295f8797e844a90b9 is 50, key is test_row_0/C:col10/1732303498542/Put/seqid=0 2024-11-22T19:25:00,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742576_1752 (size=12301) 2024-11-22T19:25:00,452 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/7790caf830b9467295f8797e844a90b9 2024-11-22T19:25:00,456 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/A/25e624e1f0a143d2901e8f0441794199 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/25e624e1f0a143d2901e8f0441794199 2024-11-22T19:25:00,458 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/25e624e1f0a143d2901e8f0441794199, entries=150, sequenceid=416, filesize=30.5 K 2024-11-22T19:25:00,459 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/B/786f94c9a4204936a52b13ebaa657d80 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/786f94c9a4204936a52b13ebaa657d80 2024-11-22T19:25:00,461 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/786f94c9a4204936a52b13ebaa657d80, entries=150, sequenceid=416, filesize=12.0 K 2024-11-22T19:25:00,461 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/.tmp/C/7790caf830b9467295f8797e844a90b9 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/7790caf830b9467295f8797e844a90b9 2024-11-22T19:25:00,463 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/7790caf830b9467295f8797e844a90b9, entries=150, sequenceid=416, filesize=12.0 K 2024-11-22T19:25:00,464 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 457088d1c2889b36850d00978a970867 in 1646ms, sequenceid=416, compaction requested=true 2024-11-22T19:25:00,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a8b5475a989f4207baa694b096958f3c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d08714a47e9348728596084aaf096328, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d56608fc61b4f2fa76a6521c495d11f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/595c943bbefc4437903d667793aa89f2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a3b2f2d2559646d883b2b5cb82f31f88, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/418eb8289ee240b39fc64542bac092d9, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/773fa8b7dfb54bb39bbed235bc4afc76, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4] to archive 2024-11-22T19:25:00,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:25:00,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8180b41d77b41d0949b2bb131963b3c 2024-11-22T19:25:00,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/139a876358cf4fad85496d87ce7d5651 2024-11-22T19:25:00,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/e390eb3dc56b4c2a9f7148c36aaf4b0c 2024-11-22T19:25:00,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a8b5475a989f4207baa694b096958f3c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a8b5475a989f4207baa694b096958f3c 2024-11-22T19:25:00,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b8820c493d324c0d8d30468cf4f68cd4 2024-11-22T19:25:00,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/ef2603f59124464c882e074445a1388a 2024-11-22T19:25:00,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c3f7234f6c9f408789191ceb4d7fe44c 2024-11-22T19:25:00,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d08714a47e9348728596084aaf096328 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d08714a47e9348728596084aaf096328 2024-11-22T19:25:00,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/b00295514887438fa396758c7496d90f 2024-11-22T19:25:00,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d56608fc61b4f2fa76a6521c495d11f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d56608fc61b4f2fa76a6521c495d11f 2024-11-22T19:25:00,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/8e8b981157be4a6980939cbe4bb38eed 2024-11-22T19:25:00,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c7376d692f9b48b897dce865b69b7145 2024-11-22T19:25:00,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/595c943bbefc4437903d667793aa89f2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/595c943bbefc4437903d667793aa89f2 2024-11-22T19:25:00,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/96342b60cb2d41d38f512963ae1e9b7b 2024-11-22T19:25:00,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/63c8f07739054d2aac4527528f7aab16 2024-11-22T19:25:00,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/c871ca70e34a4a7f85f8d04407e9149b 2024-11-22T19:25:00,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a3b2f2d2559646d883b2b5cb82f31f88 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a3b2f2d2559646d883b2b5cb82f31f88 2024-11-22T19:25:00,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/a07e2a77a6a84c979d9afc9a9598d82c 2024-11-22T19:25:00,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/418eb8289ee240b39fc64542bac092d9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/418eb8289ee240b39fc64542bac092d9 2024-11-22T19:25:00,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/2d1a934a2e0a4c1da9ece6e9914d7d2c 2024-11-22T19:25:00,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/55be4469b0fa4ac88889695280ad4970 2024-11-22T19:25:00,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/1ecd5a0bf71a4782af27eaceeae1dcfe 2024-11-22T19:25:00,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/773fa8b7dfb54bb39bbed235bc4afc76 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/773fa8b7dfb54bb39bbed235bc4afc76 2024-11-22T19:25:00,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/5404dd5deb9542819e5d35c073651387 2024-11-22T19:25:00,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/75f6d419134d4a88b5a48e085ce0e3a7 2024-11-22T19:25:00,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/6252ea920828495f9f23e5b35e2267c7 2024-11-22T19:25:00,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/856a3acfffaf42da90928019d1ebe3b4 2024-11-22T19:25:00,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/6f9cf7bfefb3436695b5d52aa5f0c2e0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/c5056491f20347c59665d3fa17c4b19c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/103b96bfbfa84183be247c3a8a5de674, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/1f855c725ace4e9c8928739e5d18a9e4, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/a3624074c1504e318d92c1d075bfe8b3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7461e2b7cd1a46e3a786d2f9afeed61e, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/5ff71c9465d14526aeb1021cf5204f59, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3eea0274293d4d69b73551ca1713049f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/fbb2eb9e28a945a6b885bdcc6634e2be, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/42262c42b2d74291ba4f4a139d19dc68, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/f8b23b87104f43d2ba1c17444f3efccb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7e7c8811c6d44f84b84f5e3072819507, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/e0f312b925ca4eb0b672ed71298e9b88, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/60d374b4e9a04f48b81a64287ab05f5b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/ad677676d9dc4784a81ca1bb7b14a82a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3ab3ddb258a142b0936d0c8f36fcdde3, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/77a798b83ad24a2fb87299c2af76a793, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/92058d9bb89f4a609219171d32f59588, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b06968f7333148aa9c56b8f9b6e9321d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3a26c614d04f49b59981095a1687fc0b, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/74e19a8217954c37a20e4e5b6e2a76e6, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/64d0079a93b84d6c995c608d46337039, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7365ab6019ba418882b9b7090cb95840, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/dca70a1e463b47dda3e36b5c1faa4d23, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/28db7323a9434b0cbc337cb1045b3fc0, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cb6c03f1a5c8476d945916b275512385, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/8ecade6fa29f4c34b360cede996192e1] to archive 2024-11-22T19:25:00,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:25:00,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/6f9cf7bfefb3436695b5d52aa5f0c2e0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/6f9cf7bfefb3436695b5d52aa5f0c2e0 2024-11-22T19:25:00,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/c5056491f20347c59665d3fa17c4b19c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/c5056491f20347c59665d3fa17c4b19c 2024-11-22T19:25:00,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/103b96bfbfa84183be247c3a8a5de674 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/103b96bfbfa84183be247c3a8a5de674 2024-11-22T19:25:00,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/1f855c725ace4e9c8928739e5d18a9e4 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/1f855c725ace4e9c8928739e5d18a9e4 2024-11-22T19:25:00,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/a3624074c1504e318d92c1d075bfe8b3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/a3624074c1504e318d92c1d075bfe8b3 2024-11-22T19:25:00,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7461e2b7cd1a46e3a786d2f9afeed61e to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7461e2b7cd1a46e3a786d2f9afeed61e 2024-11-22T19:25:00,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/5ff71c9465d14526aeb1021cf5204f59 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/5ff71c9465d14526aeb1021cf5204f59 2024-11-22T19:25:00,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3eea0274293d4d69b73551ca1713049f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3eea0274293d4d69b73551ca1713049f 2024-11-22T19:25:00,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/fbb2eb9e28a945a6b885bdcc6634e2be to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/fbb2eb9e28a945a6b885bdcc6634e2be 2024-11-22T19:25:00,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/42262c42b2d74291ba4f4a139d19dc68 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/42262c42b2d74291ba4f4a139d19dc68 2024-11-22T19:25:00,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/f8b23b87104f43d2ba1c17444f3efccb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/f8b23b87104f43d2ba1c17444f3efccb 2024-11-22T19:25:00,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7e7c8811c6d44f84b84f5e3072819507 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7e7c8811c6d44f84b84f5e3072819507 2024-11-22T19:25:00,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/e0f312b925ca4eb0b672ed71298e9b88 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/e0f312b925ca4eb0b672ed71298e9b88 2024-11-22T19:25:00,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/60d374b4e9a04f48b81a64287ab05f5b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/60d374b4e9a04f48b81a64287ab05f5b 2024-11-22T19:25:00,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/ad677676d9dc4784a81ca1bb7b14a82a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/ad677676d9dc4784a81ca1bb7b14a82a 2024-11-22T19:25:00,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3ab3ddb258a142b0936d0c8f36fcdde3 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3ab3ddb258a142b0936d0c8f36fcdde3 2024-11-22T19:25:00,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/77a798b83ad24a2fb87299c2af76a793 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/77a798b83ad24a2fb87299c2af76a793 2024-11-22T19:25:00,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/92058d9bb89f4a609219171d32f59588 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/92058d9bb89f4a609219171d32f59588 2024-11-22T19:25:00,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b06968f7333148aa9c56b8f9b6e9321d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b06968f7333148aa9c56b8f9b6e9321d 2024-11-22T19:25:00,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3a26c614d04f49b59981095a1687fc0b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/3a26c614d04f49b59981095a1687fc0b 2024-11-22T19:25:00,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/74e19a8217954c37a20e4e5b6e2a76e6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/74e19a8217954c37a20e4e5b6e2a76e6 2024-11-22T19:25:00,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/64d0079a93b84d6c995c608d46337039 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/64d0079a93b84d6c995c608d46337039 2024-11-22T19:25:00,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7365ab6019ba418882b9b7090cb95840 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/7365ab6019ba418882b9b7090cb95840 2024-11-22T19:25:00,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/dca70a1e463b47dda3e36b5c1faa4d23 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/dca70a1e463b47dda3e36b5c1faa4d23 2024-11-22T19:25:00,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/28db7323a9434b0cbc337cb1045b3fc0 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/28db7323a9434b0cbc337cb1045b3fc0 2024-11-22T19:25:00,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cb6c03f1a5c8476d945916b275512385 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cb6c03f1a5c8476d945916b275512385 2024-11-22T19:25:00,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/8ecade6fa29f4c34b360cede996192e1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/8ecade6fa29f4c34b360cede996192e1 2024-11-22T19:25:00,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/99449147689d4c738948171c4276c064, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/8ccfbd7be07641afb7c4d90ccf2eeafc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/491689f4e07141c7b9d9b4b24fc388d2, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/02f474b7e90143d3b044f3df477a2efd, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/2a5c1f2059a54e5fbccbf2a20c0760bc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/effde2e30a6045e4a0371768d7ad799c, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/948338b6a64a4fa0ae78dfc345006473, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/dfcb1b9699dd4deb9af8d06464fe56ef, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ca6873a9826a40448ad8e16ac3e80557, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ed7324cf9fd5409c94b0dff38e58e0cb, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/f338defc9282438d8e719fdd90ccc9f7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/a7cefca04fc542fbace412e6c8bc57cc, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ac50441093234a3abeeebedfd77eb394, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/85ca0cd18e924d5b885aa53b27c88494, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d8ea985abb3e4af79137616266713ed7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/303c5b997acc4165ac28160853247fc1, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/909510e8480140769f236960968b8582, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/5bd929e324e040ab9932900286eff6b7, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/307f2367c9ad43088edc25f77e8a4f90, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/806ef72bbff042398bac54a284449057, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/e40927fb8fc54e6bb50e20f9bd852c0a, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/0762add977ac49f59abad778a8330cce, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/c4a99886c3b047e2a66b975eb781726f, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/4d8a648ef9264cf4a51bdcbd2e67667d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/57475e5194ce40d6b700e58a22bb127d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d750243786c04bd3b87221aea1bfea1d, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/08b5df0af50d4e6d864595bdc449984b] to archive 2024-11-22T19:25:00,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T19:25:00,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/99449147689d4c738948171c4276c064 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/99449147689d4c738948171c4276c064 2024-11-22T19:25:00,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/8ccfbd7be07641afb7c4d90ccf2eeafc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/8ccfbd7be07641afb7c4d90ccf2eeafc 2024-11-22T19:25:00,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/491689f4e07141c7b9d9b4b24fc388d2 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/491689f4e07141c7b9d9b4b24fc388d2 2024-11-22T19:25:00,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/02f474b7e90143d3b044f3df477a2efd to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/02f474b7e90143d3b044f3df477a2efd 2024-11-22T19:25:00,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/2a5c1f2059a54e5fbccbf2a20c0760bc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/2a5c1f2059a54e5fbccbf2a20c0760bc 2024-11-22T19:25:00,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/effde2e30a6045e4a0371768d7ad799c to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/effde2e30a6045e4a0371768d7ad799c 2024-11-22T19:25:00,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/948338b6a64a4fa0ae78dfc345006473 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/948338b6a64a4fa0ae78dfc345006473 2024-11-22T19:25:00,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/dfcb1b9699dd4deb9af8d06464fe56ef to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/dfcb1b9699dd4deb9af8d06464fe56ef 2024-11-22T19:25:00,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ca6873a9826a40448ad8e16ac3e80557 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ca6873a9826a40448ad8e16ac3e80557 2024-11-22T19:25:00,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ed7324cf9fd5409c94b0dff38e58e0cb to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ed7324cf9fd5409c94b0dff38e58e0cb 2024-11-22T19:25:00,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/f338defc9282438d8e719fdd90ccc9f7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/f338defc9282438d8e719fdd90ccc9f7 2024-11-22T19:25:00,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/a7cefca04fc542fbace412e6c8bc57cc to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/a7cefca04fc542fbace412e6c8bc57cc 2024-11-22T19:25:00,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ac50441093234a3abeeebedfd77eb394 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/ac50441093234a3abeeebedfd77eb394 2024-11-22T19:25:00,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/85ca0cd18e924d5b885aa53b27c88494 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/85ca0cd18e924d5b885aa53b27c88494 2024-11-22T19:25:00,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d8ea985abb3e4af79137616266713ed7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d8ea985abb3e4af79137616266713ed7 2024-11-22T19:25:00,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/303c5b997acc4165ac28160853247fc1 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/303c5b997acc4165ac28160853247fc1 2024-11-22T19:25:00,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/909510e8480140769f236960968b8582 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/909510e8480140769f236960968b8582 2024-11-22T19:25:00,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/5bd929e324e040ab9932900286eff6b7 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/5bd929e324e040ab9932900286eff6b7 2024-11-22T19:25:00,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/307f2367c9ad43088edc25f77e8a4f90 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/307f2367c9ad43088edc25f77e8a4f90 2024-11-22T19:25:00,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/806ef72bbff042398bac54a284449057 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/806ef72bbff042398bac54a284449057 2024-11-22T19:25:00,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/e40927fb8fc54e6bb50e20f9bd852c0a to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/e40927fb8fc54e6bb50e20f9bd852c0a 2024-11-22T19:25:00,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/0762add977ac49f59abad778a8330cce to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/0762add977ac49f59abad778a8330cce 2024-11-22T19:25:00,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/c4a99886c3b047e2a66b975eb781726f to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/c4a99886c3b047e2a66b975eb781726f 2024-11-22T19:25:00,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/4d8a648ef9264cf4a51bdcbd2e67667d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/4d8a648ef9264cf4a51bdcbd2e67667d 2024-11-22T19:25:00,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/57475e5194ce40d6b700e58a22bb127d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/57475e5194ce40d6b700e58a22bb127d 2024-11-22T19:25:00,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d750243786c04bd3b87221aea1bfea1d to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/d750243786c04bd3b87221aea1bfea1d 2024-11-22T19:25:00,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/08b5df0af50d4e6d864595bdc449984b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/08b5df0af50d4e6d864595bdc449984b 2024-11-22T19:25:00,534 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/recovered.edits/419.seqid, newMaxSeqId=419, maxSeqId=4 2024-11-22T19:25:00,535 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867. 2024-11-22T19:25:00,535 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1635): Region close journal for 457088d1c2889b36850d00978a970867: 2024-11-22T19:25:00,536 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(170): Closed 457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,537 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=457088d1c2889b36850d00978a970867, regionState=CLOSED 2024-11-22T19:25:00,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-22T19:25:00,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; CloseRegionProcedure 457088d1c2889b36850d00978a970867, server=a307a1377457,35917,1732303314657 in 1.8710 sec 2024-11-22T19:25:00,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-11-22T19:25:00,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=457088d1c2889b36850d00978a970867, UNASSIGN in 1.8730 sec 2024-11-22T19:25:00,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-22T19:25:00,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8750 sec 2024-11-22T19:25:00,541 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732303500541"}]},"ts":"1732303500541"} 2024-11-22T19:25:00,541 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T19:25:00,543 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T19:25:00,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8850 sec 2024-11-22T19:25:00,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-22T19:25:00,764 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-22T19:25:00,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T19:25:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:25:00,766 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:25:00,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-22T19:25:00,766 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=185, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:25:00,767 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,769 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C, FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/recovered.edits] 2024-11-22T19:25:00,770 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/25e624e1f0a143d2901e8f0441794199 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/25e624e1f0a143d2901e8f0441794199 2024-11-22T19:25:00,771 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/7437a446b3ad46e7b25f8feb24b1d1f6 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/7437a446b3ad46e7b25f8feb24b1d1f6 2024-11-22T19:25:00,772 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d084b95d20f94ba584e30be4f82e5499 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/A/d084b95d20f94ba584e30be4f82e5499 2024-11-22T19:25:00,774 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/786f94c9a4204936a52b13ebaa657d80 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/786f94c9a4204936a52b13ebaa657d80 2024-11-22T19:25:00,774 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b2fade230d8f46b9a2a28d8709d84980 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/b2fade230d8f46b9a2a28d8709d84980 2024-11-22T19:25:00,775 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cc6be1d51cb34dbb8fe182e01aa27ddf to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/B/cc6be1d51cb34dbb8fe182e01aa27ddf 2024-11-22T19:25:00,776 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/7790caf830b9467295f8797e844a90b9 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/7790caf830b9467295f8797e844a90b9 2024-11-22T19:25:00,777 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/78ca15ab94894cc49ff79cb256326497 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/78ca15ab94894cc49ff79cb256326497 2024-11-22T19:25:00,778 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/cc97d332e0af41bb8e7a6dfc8fd0320b to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/C/cc97d332e0af41bb8e7a6dfc8fd0320b 2024-11-22T19:25:00,780 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/recovered.edits/419.seqid to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867/recovered.edits/419.seqid 2024-11-22T19:25:00,780 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/default/TestAcidGuarantees/457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,780 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T19:25:00,781 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T19:25:00,781 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-22T19:25:00,783 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112209232fdaaf16403f91270bd5539ac937_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112209232fdaaf16403f91270bd5539ac937_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,784 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411220d4cb6f7f8d2428bbcfd8aecdb061841_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411220d4cb6f7f8d2428bbcfd8aecdb061841_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,785 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221ba98400790d475e919ec5153732c97f_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221ba98400790d475e919ec5153732c97f_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,786 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112228b010a5a81043c78e48032717b72343_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112228b010a5a81043c78e48032717b72343_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,787 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122336fd31563bc4d13ad46545da3c186df_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122336fd31563bc4d13ad46545da3c186df_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,788 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122398bbdb8b9b14fe189c09c5909ceff12_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122398bbdb8b9b14fe189c09c5909ceff12_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,788 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224ac68bf1cca541789659134665c3a400_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224ac68bf1cca541789659134665c3a400_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,789 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122688fabe9137d46c7b82cdb50d24bf5a2_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122688fabe9137d46c7b82cdb50d24bf5a2_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,790 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411226a3656e7705e4509ab01f58eea5bd38e_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411226a3656e7705e4509ab01f58eea5bd38e_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,791 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112281ff00e9874848059d1cca262a009218_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112281ff00e9874848059d1cca262a009218_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,792 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228a99cc280066425b8dd3a3ec31fe83de_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228a99cc280066425b8dd3a3ec31fe83de_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,792 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229b3d5384efab4a0f8d7000c95893567a_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229b3d5384efab4a0f8d7000c95893567a_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,793 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229bc5cdb7cf8e4b1aa2a1bcb270c569ed_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229bc5cdb7cf8e4b1aa2a1bcb270c569ed_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,794 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a7559c560ac44165a205076a2a119e23_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a7559c560ac44165a205076a2a119e23_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,795 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ad952cb6154a4620b16aad7800f1cbb1_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ad952cb6154a4620b16aad7800f1cbb1_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,796 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b03684c97fef403fab2c13b7df2db3ab_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b03684c97fef403fab2c13b7df2db3ab_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,797 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b3b9c679ac3443bb9c4d5c63dfaf7603_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b3b9c679ac3443bb9c4d5c63dfaf7603_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,797 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c091f514fe5249d1b3444b41b18e2e01_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c091f514fe5249d1b3444b41b18e2e01_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,798 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c638da91324146a8b2973f95a5887eec_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c638da91324146a8b2973f95a5887eec_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,799 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdca6c4d708e41d39fbefd693aa63bd8_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdca6c4d708e41d39fbefd693aa63bd8_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,800 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122eed753f6eb23478f9869680c4d6bc8ea_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122eed753f6eb23478f9869680c4d6bc8ea_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,801 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fbbd7943cc1e448aacdc54559761882a_457088d1c2889b36850d00978a970867 to hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fbbd7943cc1e448aacdc54559761882a_457088d1c2889b36850d00978a970867 2024-11-22T19:25:00,801 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T19:25:00,803 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=185, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:25:00,804 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T19:25:00,806 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T19:25:00,806 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=185, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:25:00,807 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T19:25:00,807 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732303500807"}]},"ts":"9223372036854775807"} 2024-11-22T19:25:00,808 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T19:25:00,808 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 457088d1c2889b36850d00978a970867, NAME => 'TestAcidGuarantees,,1732303470181.457088d1c2889b36850d00978a970867.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T19:25:00,808 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T19:25:00,808 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732303500808"}]},"ts":"9223372036854775807"} 2024-11-22T19:25:00,809 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T19:25:00,811 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=185, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T19:25:00,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 46 msec 2024-11-22T19:25:00,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38701 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-22T19:25:00,867 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-22T19:25:00,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=236 (was 237), OpenFileDescriptor=451 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=326 (was 433), ProcessCount=11 (was 11), AvailableMemoryMB=4749 (was 4799) 2024-11-22T19:25:00,876 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-22T19:25:00,876 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T19:25:00,876 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:57120 2024-11-22T19:25:00,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:25:00,877 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T19:25:00,877 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1390920325, stopped=false 2024-11-22T19:25:00,877 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=a307a1377457,38701,1732303313442 2024-11-22T19:25:00,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T19:25:00,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T19:25:00,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:25:00,879 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-22T19:25:00,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:25:00,879 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:25:00,879 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'a307a1377457,35917,1732303314657' ***** 2024-11-22T19:25:00,879 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-22T19:25:00,879 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T19:25:00,879 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T19:25:00,880 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(3579): Received CLOSE for 45aa664165800f3151e26f1a3610c687 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1224): stopping server a307a1377457,35917,1732303314657 2024-11-22T19:25:00,880 DEBUG [RS:0;a307a1377457:35917 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T19:25:00,880 INFO [RS:0;a307a1377457:35917 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T19:25:00,881 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-22T19:25:00,881 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 45aa664165800f3151e26f1a3610c687, disabling compactions & flushes 2024-11-22T19:25:00,881 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1603): Online Regions={45aa664165800f3151e26f1a3610c687=hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T19:25:00,881 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. after waiting 0 ms 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-22T19:25:00,881 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 45aa664165800f3151e26f1a3610c687 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-22T19:25:00,881 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T19:25:00,881 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T19:25:00,881 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-22T19:25:00,884 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 45aa664165800f3151e26f1a3610c687 2024-11-22T19:25:00,902 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/.tmp/info/64e301e42e5c4657822de5ef9ae561b3 is 45, key is default/info:d/1732303319831/Put/seqid=0 2024-11-22T19:25:00,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742577_1753 (size=5037) 2024-11-22T19:25:00,912 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/info/c772872268164553b2047653e14b295d is 143, key is hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687./info:regioninfo/1732303319661/Put/seqid=0 2024-11-22T19:25:00,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742578_1754 (size=7725) 2024-11-22T19:25:00,918 INFO [regionserver/a307a1377457:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T19:25:00,929 INFO [regionserver/a307a1377457:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T19:25:00,929 INFO [regionserver/a307a1377457:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T19:25:01,084 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 45aa664165800f3151e26f1a3610c687 2024-11-22T19:25:01,285 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 45aa664165800f3151e26f1a3610c687 2024-11-22T19:25:01,306 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/.tmp/info/64e301e42e5c4657822de5ef9ae561b3 2024-11-22T19:25:01,309 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/.tmp/info/64e301e42e5c4657822de5ef9ae561b3 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/info/64e301e42e5c4657822de5ef9ae561b3 2024-11-22T19:25:01,311 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/info/64e301e42e5c4657822de5ef9ae561b3, entries=2, sequenceid=6, filesize=4.9 K 2024-11-22T19:25:01,312 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 45aa664165800f3151e26f1a3610c687 in 431ms, sequenceid=6, compaction requested=false 2024-11-22T19:25:01,315 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/namespace/45aa664165800f3151e26f1a3610c687/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T19:25:01,315 INFO [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:25:01,315 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 45aa664165800f3151e26f1a3610c687: 2024-11-22T19:25:01,315 DEBUG [RS_CLOSE_REGION-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732303318362.45aa664165800f3151e26f1a3610c687. 2024-11-22T19:25:01,316 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/info/c772872268164553b2047653e14b295d 2024-11-22T19:25:01,332 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/rep_barrier/3ec67010ee064f08a868d700d01c565f is 102, key is TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e./rep_barrier:/1732303346296/DeleteFamily/seqid=0 2024-11-22T19:25:01,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742579_1755 (size=6025) 2024-11-22T19:25:01,485 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T19:25:01,685 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T19:25:01,736 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/rep_barrier/3ec67010ee064f08a868d700d01c565f 2024-11-22T19:25:01,753 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/table/9461d9577c914e1abd51265d17bfbf83 is 96, key is TestAcidGuarantees,,1732303320121.686ebaaf5a8e3b2d28eef9abb3c2302e./table:/1732303346296/DeleteFamily/seqid=0 2024-11-22T19:25:01,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742580_1756 (size=5942) 2024-11-22T19:25:01,885 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-22T19:25:01,885 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T19:25:01,885 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T19:25:02,086 DEBUG [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T19:25:02,157 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/table/9461d9577c914e1abd51265d17bfbf83 2024-11-22T19:25:02,160 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/info/c772872268164553b2047653e14b295d as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/info/c772872268164553b2047653e14b295d 2024-11-22T19:25:02,164 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/info/c772872268164553b2047653e14b295d, entries=22, sequenceid=93, filesize=7.5 K 2024-11-22T19:25:02,164 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/rep_barrier/3ec67010ee064f08a868d700d01c565f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/rep_barrier/3ec67010ee064f08a868d700d01c565f 2024-11-22T19:25:02,167 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/rep_barrier/3ec67010ee064f08a868d700d01c565f, entries=6, sequenceid=93, filesize=5.9 K 2024-11-22T19:25:02,167 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/.tmp/table/9461d9577c914e1abd51265d17bfbf83 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/table/9461d9577c914e1abd51265d17bfbf83 2024-11-22T19:25:02,170 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/table/9461d9577c914e1abd51265d17bfbf83, entries=9, sequenceid=93, filesize=5.8 K 2024-11-22T19:25:02,170 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1289ms, sequenceid=93, compaction requested=false 2024-11-22T19:25:02,174 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-22T19:25:02,174 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T19:25:02,174 INFO [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-22T19:25:02,174 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-22T19:25:02,174 DEBUG [RS_CLOSE_META-regionserver/a307a1377457:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T19:25:02,286 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1250): stopping server a307a1377457,35917,1732303314657; all regions closed. 2024-11-22T19:25:02,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741834_1010 (size=26050) 2024-11-22T19:25:02,292 DEBUG [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/oldWALs 2024-11-22T19:25:02,292 INFO [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL a307a1377457%2C35917%2C1732303314657.meta:.meta(num 1732303317995) 2024-11-22T19:25:02,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741832_1008 (size=17412149) 2024-11-22T19:25:02,295 DEBUG [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/oldWALs 2024-11-22T19:25:02,296 INFO [RS:0;a307a1377457:35917 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL a307a1377457%2C35917%2C1732303314657:(num 1732303317054) 2024-11-22T19:25:02,296 DEBUG [RS:0;a307a1377457:35917 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:25:02,296 INFO [RS:0;a307a1377457:35917 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T19:25:02,296 INFO [RS:0;a307a1377457:35917 {}] hbase.ChoreService(370): Chore service for: regionserver/a307a1377457:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-22T19:25:02,296 INFO [regionserver/a307a1377457:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-22T19:25:02,297 INFO [RS:0;a307a1377457:35917 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35917 2024-11-22T19:25:02,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a307a1377457,35917,1732303314657 2024-11-22T19:25:02,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T19:25:02,302 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a307a1377457,35917,1732303314657] 2024-11-22T19:25:02,302 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing a307a1377457,35917,1732303314657; numProcessing=1 2024-11-22T19:25:02,303 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/a307a1377457,35917,1732303314657 already deleted, retry=false 2024-11-22T19:25:02,304 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; a307a1377457,35917,1732303314657 expired; onlineServers=0 2024-11-22T19:25:02,304 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'a307a1377457,38701,1732303313442' ***** 2024-11-22T19:25:02,304 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T19:25:02,304 DEBUG [M:0;a307a1377457:38701 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b2972ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a307a1377457/172.17.0.2:0 2024-11-22T19:25:02,304 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegionServer(1224): stopping server a307a1377457,38701,1732303313442 2024-11-22T19:25:02,304 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegionServer(1250): stopping server a307a1377457,38701,1732303313442; all regions closed. 2024-11-22T19:25:02,304 DEBUG [M:0;a307a1377457:38701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T19:25:02,304 DEBUG [M:0;a307a1377457:38701 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T19:25:02,304 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T19:25:02,304 DEBUG [M:0;a307a1377457:38701 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T19:25:02,304 DEBUG [master/a307a1377457:0:becomeActiveMaster-HFileCleaner.large.0-1732303316707 {}] cleaner.HFileCleaner(306): Exit Thread[master/a307a1377457:0:becomeActiveMaster-HFileCleaner.large.0-1732303316707,5,FailOnTimeoutGroup] 2024-11-22T19:25:02,304 DEBUG [master/a307a1377457:0:becomeActiveMaster-HFileCleaner.small.0-1732303316720 {}] cleaner.HFileCleaner(306): Exit Thread[master/a307a1377457:0:becomeActiveMaster-HFileCleaner.small.0-1732303316720,5,FailOnTimeoutGroup] 2024-11-22T19:25:02,304 INFO [M:0;a307a1377457:38701 {}] hbase.ChoreService(370): Chore service for: master/a307a1377457:0 had [] on shutdown 2024-11-22T19:25:02,305 DEBUG [M:0;a307a1377457:38701 {}] master.HMaster(1733): Stopping service threads 2024-11-22T19:25:02,305 INFO [M:0;a307a1377457:38701 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T19:25:02,305 ERROR [M:0;a307a1377457:38701 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-22T19:25:02,305 INFO [M:0;a307a1377457:38701 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T19:25:02,305 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T19:25:02,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T19:25:02,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T19:25:02,306 DEBUG [M:0;a307a1377457:38701 {}] zookeeper.ZKUtil(347): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T19:25:02,306 WARN [M:0;a307a1377457:38701 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T19:25:02,306 INFO [M:0;a307a1377457:38701 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-22T19:25:02,306 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T19:25:02,306 INFO [M:0;a307a1377457:38701 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T19:25:02,306 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T19:25:02,306 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:25:02,306 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:25:02,306 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T19:25:02,306 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:25:02,306 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=795.70 KB heapSize=980.48 KB 2024-11-22T19:25:02,322 DEBUG [M:0;a307a1377457:38701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eda649dc732f4ccb966dad25bafc982f is 82, key is hbase:meta,,1/info:regioninfo/1732303318214/Put/seqid=0 2024-11-22T19:25:02,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742581_1757 (size=5672) 2024-11-22T19:25:02,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T19:25:02,402 INFO [RS:0;a307a1377457:35917 {}] regionserver.HRegionServer(1307): Exiting; stopping=a307a1377457,35917,1732303314657; zookeeper connection closed. 2024-11-22T19:25:02,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35917-0x10020ae8f450001, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T19:25:02,403 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@cb73747 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@cb73747 2024-11-22T19:25:02,403 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T19:25:02,725 INFO [M:0;a307a1377457:38701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2278 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eda649dc732f4ccb966dad25bafc982f 2024-11-22T19:25:02,745 DEBUG [M:0;a307a1377457:38701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2277e1263de3471faf3138085c7383ab is 2285, key is \x00\x00\x00\x00\x00\x00\x00\x9E/proc:d/1732303473196/Put/seqid=0 2024-11-22T19:25:02,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742582_1758 (size=46166) 2024-11-22T19:25:03,150 INFO [M:0;a307a1377457:38701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=795.14 KB at sequenceid=2278 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2277e1263de3471faf3138085c7383ab 2024-11-22T19:25:03,152 INFO [M:0;a307a1377457:38701 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2277e1263de3471faf3138085c7383ab 2024-11-22T19:25:03,170 DEBUG [M:0;a307a1377457:38701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d3be90356f448c7a0bd225f65e21c22 is 69, key is a307a1377457,35917,1732303314657/rs:state/1732303316775/Put/seqid=0 2024-11-22T19:25:03,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073742583_1759 (size=5156) 2024-11-22T19:25:03,573 INFO [M:0;a307a1377457:38701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2278 (bloomFilter=true), to=hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d3be90356f448c7a0bd225f65e21c22 2024-11-22T19:25:03,577 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eda649dc732f4ccb966dad25bafc982f as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eda649dc732f4ccb966dad25bafc982f 2024-11-22T19:25:03,579 INFO [M:0;a307a1377457:38701 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eda649dc732f4ccb966dad25bafc982f, entries=8, sequenceid=2278, filesize=5.5 K 2024-11-22T19:25:03,580 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2277e1263de3471faf3138085c7383ab as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2277e1263de3471faf3138085c7383ab 2024-11-22T19:25:03,583 INFO [M:0;a307a1377457:38701 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2277e1263de3471faf3138085c7383ab 2024-11-22T19:25:03,583 INFO [M:0;a307a1377457:38701 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2277e1263de3471faf3138085c7383ab, entries=185, sequenceid=2278, filesize=45.1 K 2024-11-22T19:25:03,583 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d3be90356f448c7a0bd225f65e21c22 as hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5d3be90356f448c7a0bd225f65e21c22 2024-11-22T19:25:03,586 INFO [M:0;a307a1377457:38701 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44823/user/jenkins/test-data/8d80a3fc-8e51-7af3-cadd-b1b621432982/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5d3be90356f448c7a0bd225f65e21c22, entries=1, sequenceid=2278, filesize=5.0 K 2024-11-22T19:25:03,586 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegion(3040): Finished flush of dataSize ~795.70 KB/814796, heapSize ~980.18 KB/1003704, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1280ms, sequenceid=2278, compaction requested=false 2024-11-22T19:25:03,588 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T19:25:03,588 DEBUG [M:0;a307a1377457:38701 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-22T19:25:03,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41091 is added to blk_1073741830_1006 (size=964129) 2024-11-22T19:25:03,590 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-22T19:25:03,590 INFO [M:0;a307a1377457:38701 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-22T19:25:03,590 INFO [M:0;a307a1377457:38701 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38701 2024-11-22T19:25:03,592 DEBUG [M:0;a307a1377457:38701 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/a307a1377457,38701,1732303313442 already deleted, retry=false 2024-11-22T19:25:03,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T19:25:03,693 INFO [M:0;a307a1377457:38701 {}] regionserver.HRegionServer(1307): Exiting; stopping=a307a1377457,38701,1732303313442; zookeeper connection closed. 2024-11-22T19:25:03,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38701-0x10020ae8f450000, quorum=127.0.0.1:57120, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T19:25:03,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T19:25:03,701 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T19:25:03,701 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T19:25:03,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T19:25:03,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/hadoop.log.dir/,STOPPED} 2024-11-22T19:25:03,704 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T19:25:03,704 WARN [BP-1459558550-172.17.0.2-1732303309555 heartbeating to localhost/127.0.0.1:44823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T19:25:03,704 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T19:25:03,704 WARN [BP-1459558550-172.17.0.2-1732303309555 heartbeating to localhost/127.0.0.1:44823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1459558550-172.17.0.2-1732303309555 (Datanode Uuid 58be2d5c-3ced-4168-9580-e2f13ac1bdd8) service to localhost/127.0.0.1:44823 2024-11-22T19:25:03,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/dfs/data/data1/current/BP-1459558550-172.17.0.2-1732303309555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T19:25:03,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/cluster_36b2230a-feb7-e71f-de54-94974790eab9/dfs/data/data2/current/BP-1459558550-172.17.0.2-1732303309555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T19:25:03,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T19:25:03,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T19:25:03,715 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T19:25:03,715 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T19:25:03,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T19:25:03,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aef0a1d6-0d0d-9e22-faea-0baffbe4b261/hadoop.log.dir/,STOPPED} 2024-11-22T19:25:03,740 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-22T19:25:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down